From 554fd8c5195424bdbcabf5de30fdc183aba391bd Mon Sep 17 00:00:00 2001 From: upstream source tree Date: Sun, 15 Mar 2015 20:14:05 -0400 Subject: obtained gcc-4.6.4.tar.bz2 from upstream website; verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository. --- gcc/config/rs6000/40x.md | 120 + gcc/config/rs6000/440.md | 133 + gcc/config/rs6000/476.md | 142 + gcc/config/rs6000/603.md | 143 + gcc/config/rs6000/6xx.md | 275 + gcc/config/rs6000/7450.md | 185 + gcc/config/rs6000/750cl.h | 30 + gcc/config/rs6000/7xx.md | 184 + gcc/config/rs6000/8540.md | 250 + gcc/config/rs6000/a2.md | 134 + gcc/config/rs6000/aix-stdint.h | 51 + gcc/config/rs6000/aix.h | 260 + gcc/config/rs6000/aix43.h | 185 + gcc/config/rs6000/aix51.h | 189 + gcc/config/rs6000/aix52.h | 199 + gcc/config/rs6000/aix53.h | 199 + gcc/config/rs6000/aix61.h | 200 + gcc/config/rs6000/aix64.opt | 38 + gcc/config/rs6000/altivec.h | 493 + gcc/config/rs6000/altivec.md | 2749 +++ gcc/config/rs6000/biarch64.h | 26 + gcc/config/rs6000/cell.md | 400 + gcc/config/rs6000/constraints.md | 201 + gcc/config/rs6000/crtresfpr.asm | 81 + gcc/config/rs6000/crtresgpr.asm | 81 + gcc/config/rs6000/crtresxfpr.asm | 126 + gcc/config/rs6000/crtresxgpr.asm | 124 + gcc/config/rs6000/crtsavfpr.asm | 81 + gcc/config/rs6000/crtsavgpr.asm | 81 + gcc/config/rs6000/darwin-asm.h | 51 + gcc/config/rs6000/darwin-fallback.c | 487 + gcc/config/rs6000/darwin-fpsave.asm | 92 + gcc/config/rs6000/darwin-ldouble-format | 91 + gcc/config/rs6000/darwin-ldouble.c | 438 + gcc/config/rs6000/darwin-libgcc.10.4.ver | 93 + gcc/config/rs6000/darwin-libgcc.10.5.ver | 106 + gcc/config/rs6000/darwin-tramp.asm | 125 + gcc/config/rs6000/darwin-unwind.h | 30 + gcc/config/rs6000/darwin-vecsave.asm | 155 + gcc/config/rs6000/darwin-world.asm | 259 + gcc/config/rs6000/darwin.h | 438 + gcc/config/rs6000/darwin.md | 442 + gcc/config/rs6000/darwin.opt | 42 + gcc/config/rs6000/darwin64.h | 35 + gcc/config/rs6000/darwin7.h | 30 + gcc/config/rs6000/darwin8.h | 32 + gcc/config/rs6000/default64.h | 24 + gcc/config/rs6000/dfp.md | 594 + gcc/config/rs6000/driver-rs6000.c | 547 + gcc/config/rs6000/e300c2c3.md | 189 + gcc/config/rs6000/e500-double.h | 24 + gcc/config/rs6000/e500.h | 57 + gcc/config/rs6000/e500crtres32gpr.asm | 73 + gcc/config/rs6000/e500crtres64gpr.asm | 73 + gcc/config/rs6000/e500crtres64gprctr.asm | 90 + gcc/config/rs6000/e500crtrest32gpr.asm | 75 + gcc/config/rs6000/e500crtrest64gpr.asm | 74 + gcc/config/rs6000/e500crtresx32gpr.asm | 75 + gcc/config/rs6000/e500crtresx64gpr.asm | 75 + gcc/config/rs6000/e500crtsav32gpr.asm | 73 + gcc/config/rs6000/e500crtsav64gpr.asm | 72 + gcc/config/rs6000/e500crtsav64gprctr.asm | 91 + gcc/config/rs6000/e500crtsavg32gpr.asm | 73 + gcc/config/rs6000/e500crtsavg64gpr.asm | 73 + gcc/config/rs6000/e500crtsavg64gprctr.asm | 90 + gcc/config/rs6000/e500mc.md | 200 + gcc/config/rs6000/e500mc64.md | 191 + gcc/config/rs6000/eabi-ci.asm | 113 + gcc/config/rs6000/eabi-cn.asm | 104 + gcc/config/rs6000/eabi.asm | 289 + gcc/config/rs6000/eabi.h | 44 + gcc/config/rs6000/eabialtivec.h | 30 + gcc/config/rs6000/eabisim.h | 54 + gcc/config/rs6000/eabispe.h | 54 + gcc/config/rs6000/freebsd.h | 80 + gcc/config/rs6000/gnu.h | 37 + gcc/config/rs6000/host-darwin.c | 154 + gcc/config/rs6000/host-ppc64-darwin.c | 30 + gcc/config/rs6000/libgcc-ppc-glibc.ver | 73 + gcc/config/rs6000/libgcc-ppc64.ver | 7 + gcc/config/rs6000/linux-unwind.h | 355 + gcc/config/rs6000/linux.h | 134 + gcc/config/rs6000/linux64.h | 569 + gcc/config/rs6000/linux64.opt | 28 + gcc/config/rs6000/linuxaltivec.h | 30 + gcc/config/rs6000/linuxspe.h | 44 + gcc/config/rs6000/lynx.h | 125 + gcc/config/rs6000/milli.exp | 7 + gcc/config/rs6000/mpc.md | 111 + gcc/config/rs6000/netbsd.h | 93 + gcc/config/rs6000/option-defaults.h | 64 + gcc/config/rs6000/paired.h | 75 + gcc/config/rs6000/paired.md | 527 + gcc/config/rs6000/power4.md | 410 + gcc/config/rs6000/power5.md | 308 + gcc/config/rs6000/power6.md | 573 + gcc/config/rs6000/power7.md | 318 + gcc/config/rs6000/ppc-asm.h | 358 + gcc/config/rs6000/ppc64-fp.c | 239 + gcc/config/rs6000/ppu_intrinsics.h | 727 + gcc/config/rs6000/predicates.md | 1423 ++ gcc/config/rs6000/rios1.md | 191 + gcc/config/rs6000/rios2.md | 129 + gcc/config/rs6000/rs6000-builtin.def | 1020 + gcc/config/rs6000/rs6000-c.c | 3772 ++++ gcc/config/rs6000/rs6000-modes.def | 41 + gcc/config/rs6000/rs6000-opts.h | 144 + gcc/config/rs6000/rs6000-protos.h | 198 + gcc/config/rs6000/rs6000.c | 28250 ++++++++++++++++++++++++++++ gcc/config/rs6000/rs6000.h | 2439 +++ gcc/config/rs6000/rs6000.md | 16361 ++++++++++++++++ gcc/config/rs6000/rs6000.opt | 464 + gcc/config/rs6000/rs64.md | 154 + gcc/config/rs6000/rtems.h | 56 + gcc/config/rs6000/secureplt.h | 20 + gcc/config/rs6000/sfp-machine.h | 68 + gcc/config/rs6000/si2vmx.h | 2048 ++ gcc/config/rs6000/singlefp.h | 40 + gcc/config/rs6000/sol-ci.asm | 94 + gcc/config/rs6000/sol-cn.asm | 72 + gcc/config/rs6000/spe.h | 1107 ++ gcc/config/rs6000/spe.md | 3190 ++++ gcc/config/rs6000/spu2vmx.h | 2415 +++ gcc/config/rs6000/sync.md | 622 + gcc/config/rs6000/sysv4.h | 1039 + gcc/config/rs6000/sysv4.opt | 145 + gcc/config/rs6000/sysv4le.h | 36 + gcc/config/rs6000/t-aix43 | 95 + gcc/config/rs6000/t-aix52 | 75 + gcc/config/rs6000/t-darwin | 55 + gcc/config/rs6000/t-darwin64 | 12 + gcc/config/rs6000/t-darwin8 | 3 + gcc/config/rs6000/t-fprules | 29 + gcc/config/rs6000/t-fprules-fpbit | 29 + gcc/config/rs6000/t-fprules-softfp | 6 + gcc/config/rs6000/t-freebsd | 24 + gcc/config/rs6000/t-linux | 9 + gcc/config/rs6000/t-linux64 | 45 + gcc/config/rs6000/t-lynx | 56 + gcc/config/rs6000/t-netbsd | 90 + gcc/config/rs6000/t-ppccomm | 75 + gcc/config/rs6000/t-ppcendian | 30 + gcc/config/rs6000/t-ppcgas | 33 + gcc/config/rs6000/t-ppcos | 8 + gcc/config/rs6000/t-rs6000 | 71 + gcc/config/rs6000/t-rtems | 82 + gcc/config/rs6000/t-spe | 86 + gcc/config/rs6000/t-vxworks | 34 + gcc/config/rs6000/t-vxworksae | 5 + gcc/config/rs6000/t-xilinx | 56 + gcc/config/rs6000/titan.md | 171 + gcc/config/rs6000/tramp.asm | 107 + gcc/config/rs6000/vec_types.h | 52 + gcc/config/rs6000/vector.md | 1175 ++ gcc/config/rs6000/vsx.md | 1152 ++ gcc/config/rs6000/vxworks.h | 146 + gcc/config/rs6000/vxworksae.h | 23 + gcc/config/rs6000/x-aix | 6 + gcc/config/rs6000/x-darwin | 5 + gcc/config/rs6000/x-darwin64 | 5 + gcc/config/rs6000/x-linux-relax | 2 + gcc/config/rs6000/x-rs6000 | 3 + gcc/config/rs6000/xcoff.h | 333 + gcc/config/rs6000/xfpu.h | 26 + gcc/config/rs6000/xfpu.md | 140 + gcc/config/rs6000/xilinx.h | 47 + gcc/config/rs6000/xilinx.opt | 33 + 167 files changed, 89275 insertions(+) create mode 100644 gcc/config/rs6000/40x.md create mode 100644 gcc/config/rs6000/440.md create mode 100644 gcc/config/rs6000/476.md create mode 100644 gcc/config/rs6000/603.md create mode 100644 gcc/config/rs6000/6xx.md create mode 100644 gcc/config/rs6000/7450.md create mode 100644 gcc/config/rs6000/750cl.h create mode 100644 gcc/config/rs6000/7xx.md create mode 100644 gcc/config/rs6000/8540.md create mode 100644 gcc/config/rs6000/a2.md create mode 100644 gcc/config/rs6000/aix-stdint.h create mode 100644 gcc/config/rs6000/aix.h create mode 100644 gcc/config/rs6000/aix43.h create mode 100644 gcc/config/rs6000/aix51.h create mode 100644 gcc/config/rs6000/aix52.h create mode 100644 gcc/config/rs6000/aix53.h create mode 100644 gcc/config/rs6000/aix61.h create mode 100644 gcc/config/rs6000/aix64.opt create mode 100644 gcc/config/rs6000/altivec.h create mode 100644 gcc/config/rs6000/altivec.md create mode 100644 gcc/config/rs6000/biarch64.h create mode 100644 gcc/config/rs6000/cell.md create mode 100644 gcc/config/rs6000/constraints.md create mode 100644 gcc/config/rs6000/crtresfpr.asm create mode 100644 gcc/config/rs6000/crtresgpr.asm create mode 100644 gcc/config/rs6000/crtresxfpr.asm create mode 100644 gcc/config/rs6000/crtresxgpr.asm create mode 100644 gcc/config/rs6000/crtsavfpr.asm create mode 100644 gcc/config/rs6000/crtsavgpr.asm create mode 100644 gcc/config/rs6000/darwin-asm.h create mode 100644 gcc/config/rs6000/darwin-fallback.c create mode 100644 gcc/config/rs6000/darwin-fpsave.asm create mode 100644 gcc/config/rs6000/darwin-ldouble-format create mode 100644 gcc/config/rs6000/darwin-ldouble.c create mode 100644 gcc/config/rs6000/darwin-libgcc.10.4.ver create mode 100644 gcc/config/rs6000/darwin-libgcc.10.5.ver create mode 100644 gcc/config/rs6000/darwin-tramp.asm create mode 100644 gcc/config/rs6000/darwin-unwind.h create mode 100644 gcc/config/rs6000/darwin-vecsave.asm create mode 100644 gcc/config/rs6000/darwin-world.asm create mode 100644 gcc/config/rs6000/darwin.h create mode 100644 gcc/config/rs6000/darwin.md create mode 100644 gcc/config/rs6000/darwin.opt create mode 100644 gcc/config/rs6000/darwin64.h create mode 100644 gcc/config/rs6000/darwin7.h create mode 100644 gcc/config/rs6000/darwin8.h create mode 100644 gcc/config/rs6000/default64.h create mode 100644 gcc/config/rs6000/dfp.md create mode 100644 gcc/config/rs6000/driver-rs6000.c create mode 100644 gcc/config/rs6000/e300c2c3.md create mode 100644 gcc/config/rs6000/e500-double.h create mode 100644 gcc/config/rs6000/e500.h create mode 100644 gcc/config/rs6000/e500crtres32gpr.asm create mode 100644 gcc/config/rs6000/e500crtres64gpr.asm create mode 100644 gcc/config/rs6000/e500crtres64gprctr.asm create mode 100644 gcc/config/rs6000/e500crtrest32gpr.asm create mode 100644 gcc/config/rs6000/e500crtrest64gpr.asm create mode 100644 gcc/config/rs6000/e500crtresx32gpr.asm create mode 100644 gcc/config/rs6000/e500crtresx64gpr.asm create mode 100644 gcc/config/rs6000/e500crtsav32gpr.asm create mode 100644 gcc/config/rs6000/e500crtsav64gpr.asm create mode 100644 gcc/config/rs6000/e500crtsav64gprctr.asm create mode 100644 gcc/config/rs6000/e500crtsavg32gpr.asm create mode 100644 gcc/config/rs6000/e500crtsavg64gpr.asm create mode 100644 gcc/config/rs6000/e500crtsavg64gprctr.asm create mode 100644 gcc/config/rs6000/e500mc.md create mode 100644 gcc/config/rs6000/e500mc64.md create mode 100644 gcc/config/rs6000/eabi-ci.asm create mode 100644 gcc/config/rs6000/eabi-cn.asm create mode 100644 gcc/config/rs6000/eabi.asm create mode 100644 gcc/config/rs6000/eabi.h create mode 100644 gcc/config/rs6000/eabialtivec.h create mode 100644 gcc/config/rs6000/eabisim.h create mode 100644 gcc/config/rs6000/eabispe.h create mode 100644 gcc/config/rs6000/freebsd.h create mode 100644 gcc/config/rs6000/gnu.h create mode 100644 gcc/config/rs6000/host-darwin.c create mode 100644 gcc/config/rs6000/host-ppc64-darwin.c create mode 100644 gcc/config/rs6000/libgcc-ppc-glibc.ver create mode 100644 gcc/config/rs6000/libgcc-ppc64.ver create mode 100644 gcc/config/rs6000/linux-unwind.h create mode 100644 gcc/config/rs6000/linux.h create mode 100644 gcc/config/rs6000/linux64.h create mode 100644 gcc/config/rs6000/linux64.opt create mode 100644 gcc/config/rs6000/linuxaltivec.h create mode 100644 gcc/config/rs6000/linuxspe.h create mode 100644 gcc/config/rs6000/lynx.h create mode 100644 gcc/config/rs6000/milli.exp create mode 100644 gcc/config/rs6000/mpc.md create mode 100644 gcc/config/rs6000/netbsd.h create mode 100644 gcc/config/rs6000/option-defaults.h create mode 100644 gcc/config/rs6000/paired.h create mode 100644 gcc/config/rs6000/paired.md create mode 100644 gcc/config/rs6000/power4.md create mode 100644 gcc/config/rs6000/power5.md create mode 100644 gcc/config/rs6000/power6.md create mode 100644 gcc/config/rs6000/power7.md create mode 100644 gcc/config/rs6000/ppc-asm.h create mode 100644 gcc/config/rs6000/ppc64-fp.c create mode 100644 gcc/config/rs6000/ppu_intrinsics.h create mode 100644 gcc/config/rs6000/predicates.md create mode 100644 gcc/config/rs6000/rios1.md create mode 100644 gcc/config/rs6000/rios2.md create mode 100644 gcc/config/rs6000/rs6000-builtin.def create mode 100644 gcc/config/rs6000/rs6000-c.c create mode 100644 gcc/config/rs6000/rs6000-modes.def create mode 100644 gcc/config/rs6000/rs6000-opts.h create mode 100644 gcc/config/rs6000/rs6000-protos.h create mode 100644 gcc/config/rs6000/rs6000.c create mode 100644 gcc/config/rs6000/rs6000.h create mode 100644 gcc/config/rs6000/rs6000.md create mode 100644 gcc/config/rs6000/rs6000.opt create mode 100644 gcc/config/rs6000/rs64.md create mode 100644 gcc/config/rs6000/rtems.h create mode 100644 gcc/config/rs6000/secureplt.h create mode 100644 gcc/config/rs6000/sfp-machine.h create mode 100644 gcc/config/rs6000/si2vmx.h create mode 100644 gcc/config/rs6000/singlefp.h create mode 100644 gcc/config/rs6000/sol-ci.asm create mode 100644 gcc/config/rs6000/sol-cn.asm create mode 100644 gcc/config/rs6000/spe.h create mode 100644 gcc/config/rs6000/spe.md create mode 100644 gcc/config/rs6000/spu2vmx.h create mode 100644 gcc/config/rs6000/sync.md create mode 100644 gcc/config/rs6000/sysv4.h create mode 100644 gcc/config/rs6000/sysv4.opt create mode 100644 gcc/config/rs6000/sysv4le.h create mode 100644 gcc/config/rs6000/t-aix43 create mode 100644 gcc/config/rs6000/t-aix52 create mode 100644 gcc/config/rs6000/t-darwin create mode 100644 gcc/config/rs6000/t-darwin64 create mode 100644 gcc/config/rs6000/t-darwin8 create mode 100644 gcc/config/rs6000/t-fprules create mode 100644 gcc/config/rs6000/t-fprules-fpbit create mode 100644 gcc/config/rs6000/t-fprules-softfp create mode 100644 gcc/config/rs6000/t-freebsd create mode 100644 gcc/config/rs6000/t-linux create mode 100644 gcc/config/rs6000/t-linux64 create mode 100644 gcc/config/rs6000/t-lynx create mode 100644 gcc/config/rs6000/t-netbsd create mode 100644 gcc/config/rs6000/t-ppccomm create mode 100644 gcc/config/rs6000/t-ppcendian create mode 100644 gcc/config/rs6000/t-ppcgas create mode 100644 gcc/config/rs6000/t-ppcos create mode 100644 gcc/config/rs6000/t-rs6000 create mode 100644 gcc/config/rs6000/t-rtems create mode 100644 gcc/config/rs6000/t-spe create mode 100644 gcc/config/rs6000/t-vxworks create mode 100644 gcc/config/rs6000/t-vxworksae create mode 100644 gcc/config/rs6000/t-xilinx create mode 100644 gcc/config/rs6000/titan.md create mode 100644 gcc/config/rs6000/tramp.asm create mode 100644 gcc/config/rs6000/vec_types.h create mode 100644 gcc/config/rs6000/vector.md create mode 100644 gcc/config/rs6000/vsx.md create mode 100644 gcc/config/rs6000/vxworks.h create mode 100644 gcc/config/rs6000/vxworksae.h create mode 100644 gcc/config/rs6000/x-aix create mode 100644 gcc/config/rs6000/x-darwin create mode 100644 gcc/config/rs6000/x-darwin64 create mode 100644 gcc/config/rs6000/x-linux-relax create mode 100644 gcc/config/rs6000/x-rs6000 create mode 100644 gcc/config/rs6000/xcoff.h create mode 100644 gcc/config/rs6000/xfpu.h create mode 100644 gcc/config/rs6000/xfpu.md create mode 100644 gcc/config/rs6000/xilinx.h create mode 100644 gcc/config/rs6000/xilinx.opt (limited to 'gcc/config/rs6000') diff --git a/gcc/config/rs6000/40x.md b/gcc/config/rs6000/40x.md new file mode 100644 index 000000000..eaf1222ec --- /dev/null +++ b/gcc/config/rs6000/40x.md @@ -0,0 +1,120 @@ +;; Scheduling description for IBM PowerPC 403 and PowerPC 405 processors. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppc40x,ppc40xiu") +(define_cpu_unit "bpu_40x,fpu_405" "ppc40x") +(define_cpu_unit "iu_40x" "ppc40xiu") + +;; PPC401 / PPC403 / PPC405 32-bit integer only IU BPU +;; Embedded PowerPC controller +;; In-order execution +;; Max issue two insns/cycle (includes one branch) +(define_insn_reservation "ppc403-load" 2 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,store_c,sync") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-store" 2 + (and (eq_attr "type" "store,store_ux,store_u") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x,iu_40x") + +(define_insn_reservation "ppc403-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x,iu_40x,iu_40x") + +(define_insn_reservation "ppc403-compare" 3 + (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x,nothing,bpu_40x") + +(define_insn_reservation "ppc403-imul" 4 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppc403")) + "iu_40x*4") + +(define_insn_reservation "ppc405-imul" 5 + (and (eq_attr "type" "imul,imul_compare") + (eq_attr "cpu" "ppc405")) + "iu_40x*4") + +(define_insn_reservation "ppc405-imul2" 3 + (and (eq_attr "type" "imul2") + (eq_attr "cpu" "ppc405")) + "iu_40x*2") + +(define_insn_reservation "ppc405-imul3" 2 + (and (eq_attr "type" "imul3") + (eq_attr "cpu" "ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-idiv" 33 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x*33") + +(define_insn_reservation "ppc403-mfcr" 2 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-mtcr" 3 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-mtjmpr" 4 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-mfjmpr" 2 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "ppc403,ppc405")) + "iu_40x") + +(define_insn_reservation "ppc403-jmpreg" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "ppc403,ppc405")) + "bpu_40x") + +(define_insn_reservation "ppc403-cr" 2 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppc403,ppc405")) + "bpu_40x") + +(define_insn_reservation "ppc405-float" 11 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,\ + fpcompare,fp,dmul,sdiv,ddiv") + (eq_attr "cpu" "ppc405")) + "fpu_405*10") diff --git a/gcc/config/rs6000/440.md b/gcc/config/rs6000/440.md new file mode 100644 index 000000000..b329e7897 --- /dev/null +++ b/gcc/config/rs6000/440.md @@ -0,0 +1,133 @@ +;; Scheduling description for IBM PowerPC 440 processor. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; PPC440 Embedded PowerPC controller +;; dual issue +;; i_pipe - complex integer / compare / branch +;; j_pipe - simple integer arithmetic +;; l_pipe - load-store +;; f_pipe - floating point arithmetic + +(define_automaton "ppc440_core,ppc440_apu") +(define_cpu_unit "ppc440_i_pipe,ppc440_j_pipe,ppc440_l_pipe" "ppc440_core") +(define_cpu_unit "ppc440_f_pipe" "ppc440_apu") +(define_cpu_unit "ppc440_issue_0,ppc440_issue_1" "ppc440_core") + +(define_reservation "ppc440_issue" "ppc440_issue_0|ppc440_issue_1") + + +(define_insn_reservation "ppc440-load" 3 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,store_c,sync") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_l_pipe") + +(define_insn_reservation "ppc440-store" 3 + (and (eq_attr "type" "store,store_ux,store_u") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_l_pipe") + +(define_insn_reservation "ppc440-fpload" 4 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_l_pipe") + +(define_insn_reservation "ppc440-fpstore" 3 + (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_l_pipe") + +(define_insn_reservation "ppc440-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\ + trap,var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe|ppc440_j_pipe") + +(define_insn_reservation "ppc440-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc440")) + "ppc440_issue_0+ppc440_issue_1,\ + ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe") + +(define_insn_reservation "ppc440-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc440")) + "ppc440_issue_0+ppc440_issue_1,ppc440_i_pipe|ppc440_j_pipe,\ + ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe") + +(define_insn_reservation "ppc440-imul" 3 + (and (eq_attr "type" "imul,imul_compare") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe") + +(define_insn_reservation "ppc440-imul2" 2 + (and (eq_attr "type" "imul2,imul3") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe") + +(define_insn_reservation "ppc440-idiv" 34 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe*33") + +(define_insn_reservation "ppc440-branch" 1 + (and (eq_attr "type" "branch,jmpreg,isync") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe") + +(define_insn_reservation "ppc440-compare" 2 + (and (eq_attr "type" "cmp,fast_compare,compare,cr_logical,delayed_cr,mfcr") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe") + +(define_insn_reservation "ppc440-fpcompare" 3 ; 2 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_f_pipe+ppc440_i_pipe") + +(define_insn_reservation "ppc440-fp" 5 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_f_pipe") + +(define_insn_reservation "ppc440-sdiv" 19 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_f_pipe*15") + +(define_insn_reservation "ppc440-ddiv" 33 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_f_pipe*29") + +(define_insn_reservation "ppc440-mtcr" 3 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe") + +(define_insn_reservation "ppc440-mtjmpr" 4 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe") + +(define_insn_reservation "ppc440-mfjmpr" 2 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "ppc440")) + "ppc440_issue,ppc440_i_pipe") + diff --git a/gcc/config/rs6000/476.md b/gcc/config/rs6000/476.md new file mode 100644 index 000000000..3f50bafa0 --- /dev/null +++ b/gcc/config/rs6000/476.md @@ -0,0 +1,142 @@ +;; Scheduling description for IBM PowerPC 476 processor. +;; Copyright (C) 2009 +;; Free Software Foundation, Inc. +;; Contributed by Peter Bergner (bergner@vnet.ibm.com). +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; PPC476 Embedded PowerPC controller +;; 3 issue (476) / 4 issue (476fp) +;; +;; i_pipe - complex integer / compare +;; lj_pipe - load-store / simple integer arithmetic +;; b_pipe - branch pipe +;; f_pipe - floating point arithmetic + +(define_automaton "ppc476_core,ppc476_apu") + +(define_cpu_unit "ppc476_i_pipe,ppc476_lj_pipe,ppc476_b_pipe" "ppc476_core") +(define_cpu_unit "ppc476_issue_fp,ppc476_f_pipe" "ppc476_apu") +(define_cpu_unit "ppc476_issue_0,ppc476_issue_1,ppc476_issue_2" "ppc476_core") + +(define_reservation "ppc476_issue" "ppc476_issue_0|ppc476_issue_1|ppc476_issue_2") +(define_reservation "ppc476_issue2" "ppc476_issue_0+ppc476_issue_1\ + |ppc476_issue_0+ppc476_issue_2\ + |ppc476_issue_1+ppc476_issue_2") +(define_reservation "ppc476_issue3" "ppc476_issue_0+ppc476_issue_1+ppc476_issue_2") + +(define_insn_reservation "ppc476-load" 4 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,store_c,sync") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_lj_pipe") + +(define_insn_reservation "ppc476-store" 4 + (and (eq_attr "type" "store,store_ux,store_u") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_lj_pipe") + +(define_insn_reservation "ppc476-fpload" 4 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_lj_pipe") + +(define_insn_reservation "ppc476-fpstore" 4 + (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_lj_pipe") + +(define_insn_reservation "ppc476-simple-integer" 1 + (and (eq_attr "type" "integer,insert_word,var_shift_rotate,exts,shift") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_i_pipe|ppc476_lj_pipe") + +(define_insn_reservation "ppc476-complex-integer" 1 + (and (eq_attr "type" "cmp,cr_logical,delayed_cr,cntlz,isel,isync,sync,trap") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_i_pipe") + +(define_insn_reservation "ppc476-compare" 4 + (and (eq_attr "type" "compare,delayed_compare,fast_compare,mfcr,mfcrf,\ + mtcr,mfjmpr,mtjmpr,var_delayed_compare") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_i_pipe") + +(define_insn_reservation "ppc476-imul" 4 + (and (eq_attr "type" "imul,imul_compare,imul2,imul3") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_i_pipe") + +(define_insn_reservation "ppc476-idiv" 11 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_i_pipe*11") + +(define_insn_reservation "ppc476-branch" 1 + (and (eq_attr "type" "branch,jmpreg") + (eq_attr "cpu" "ppc476")) + "ppc476_issue,\ + ppc476_b_pipe") + +(define_insn_reservation "ppc476-two" 2 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc476")) + "ppc476_issue2,\ + ppc476_i_pipe|ppc476_lj_pipe,\ + ppc476_i_pipe|ppc476_lj_pipe") + +(define_insn_reservation "ppc476-three" 3 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc476")) + "ppc476_issue3,\ + ppc476_i_pipe|ppc476_lj_pipe,\ + ppc476_i_pipe|ppc476_lj_pipe,\ + ppc476_i_pipe|ppc476_lj_pipe") + +(define_insn_reservation "ppc476-fpcompare" 6 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc476")) + "ppc476_issue+ppc476_issue_fp,\ + ppc476_f_pipe+ppc476_i_pipe") + +(define_insn_reservation "ppc476-fp" 6 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "ppc476")) + "ppc476_issue_fp,\ + ppc476_f_pipe") + +(define_insn_reservation "ppc476-sdiv" 19 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc476")) + "ppc476_issue_fp, + ppc476_f_pipe*19") + +(define_insn_reservation "ppc476-ddiv" 33 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc476")) + "ppc476_issue_fp,\ + ppc476_f_pipe*33") + diff --git a/gcc/config/rs6000/603.md b/gcc/config/rs6000/603.md new file mode 100644 index 000000000..a042729a1 --- /dev/null +++ b/gcc/config/rs6000/603.md @@ -0,0 +1,143 @@ +;; Scheduling description for PowerPC 603 processor. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppc603,ppc603fp") +(define_cpu_unit "iu_603" "ppc603") +(define_cpu_unit "fpu_603" "ppc603fp") +(define_cpu_unit "lsu_603,bpu_603,sru_603" "ppc603") + +;; PPC603/PPC603e 32-bit IU, LSU, FPU, BPU, SRU +;; Max issue 3 insns/clock cycle (includes 1 branch) + +;; Branches go straight to the BPU. All other insns are handled +;; by a dispatch unit which can issue a max of 2 insns per cycle. + +;; The PPC603e user's manual recommends that to reduce branch mispredictions, +;; the insn that sets CR bits should be separated from the branch insn +;; that evaluates them; separation by more than 9 insns ensures that the CR +;; bits will be immediately available for execution. +;; This could be artificially achieved by exaggerating the latency of +;; compare insns but at the expense of a poorer schedule. + +;; CR insns get executed in the SRU. Not modelled. + +(define_insn_reservation "ppc603-load" 2 + (and (eq_attr "type" "load,load_ext,load_ux,load_u,load_l") + (eq_attr "cpu" "ppc603")) + "lsu_603") + +(define_insn_reservation "ppc603-store" 2 + (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppc603")) + "lsu_603*2") + +(define_insn_reservation "ppc603-fpload" 2 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppc603")) + "lsu_603") + +(define_insn_reservation "ppc603-storec" 8 + (and (eq_attr "type" "store_c") + (eq_attr "cpu" "ppc603")) + "lsu_603") + +(define_insn_reservation "ppc603-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppc603")) + "iu_603") + +(define_insn_reservation "ppc603-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc603")) + "iu_603,iu_603") + +(define_insn_reservation "ppc603-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc603")) + "iu_603,iu_603,iu_603") + +; This takes 2 or 3 cycles +(define_insn_reservation "ppc603-imul" 3 + (and (eq_attr "type" "imul,imul_compare") + (eq_attr "cpu" "ppc603")) + "iu_603*2") + +(define_insn_reservation "ppc603-imul2" 2 + (and (eq_attr "type" "imul2,imul3") + (eq_attr "cpu" "ppc603")) + "iu_603*2") + +(define_insn_reservation "ppc603-idiv" 37 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc603")) + "iu_603*37") + +(define_insn_reservation "ppc603-compare" 3 + (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "ppc603")) + "iu_603,nothing,bpu_603") + +(define_insn_reservation "ppc603-fpcompare" 3 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc603")) + "(fpu_603+iu_603*2),bpu_603") + +(define_insn_reservation "ppc603-fp" 3 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "ppc603")) + "fpu_603") + +(define_insn_reservation "ppc603-dmul" 4 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "ppc603")) + "fpu_603*2") + +; Divides are not pipelined +(define_insn_reservation "ppc603-sdiv" 18 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc603")) + "fpu_603*18") + +(define_insn_reservation "ppc603-ddiv" 33 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc603")) + "fpu_603*33") + +(define_insn_reservation "ppc603-crlogical" 2 + (and (eq_attr "type" "cr_logical,delayed_cr,mfcr,mtcr") + (eq_attr "cpu" "ppc603")) + "sru_603") + +(define_insn_reservation "ppc603-mtjmpr" 4 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "ppc603")) + "sru_603") + +(define_insn_reservation "ppc603-mfjmpr" 2 + (and (eq_attr "type" "mfjmpr,isync,sync") + (eq_attr "cpu" "ppc603")) + "sru_603") + +(define_insn_reservation "ppc603-jmpreg" 1 + (and (eq_attr "type" "jmpreg,branch") + (eq_attr "cpu" "ppc603")) + "bpu_603") + diff --git a/gcc/config/rs6000/6xx.md b/gcc/config/rs6000/6xx.md new file mode 100644 index 000000000..b0de97315 --- /dev/null +++ b/gcc/config/rs6000/6xx.md @@ -0,0 +1,275 @@ +;; Scheduling description for PowerPC 604, PowerPC 604e, PowerPC 620, +;; and PowerPC 630 processors. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppc6xx,ppc6xxfp,ppc6xxfp2") +(define_cpu_unit "iu1_6xx,iu2_6xx,mciu_6xx" "ppc6xx") +(define_cpu_unit "fpu_6xx" "ppc6xxfp") +(define_cpu_unit "fpu1_6xx,fpu2_6xx" "ppc6xxfp2") +(define_cpu_unit "lsu_6xx,bpu_6xx,cru_6xx" "ppc6xx") + +;; PPC604 32-bit 2xSCIU, MCIU, LSU, FPU, BPU +;; PPC604e 32-bit 2xSCIU, MCIU, LSU, FPU, BPU, CRU +;; MCIU used for imul/idiv and moves from/to spr +;; LSU 2 stage pipelined +;; FPU 3 stage pipelined +;; Max issue 4 insns/clock cycle + +;; PPC604e is PPC604 with larger caches and a CRU. In the 604 +;; the CR logical operations are handled in the BPU. +;; In the 604e, the CRU shares bus with BPU so only one condition +;; register or branch insn can be issued per clock. Not modelled. + +;; PPC620 64-bit 2xSCIU, MCIU, LSU, FPU, BPU, CRU +;; PPC630 64-bit 2xSCIU, MCIU, LSU, 2xFPU, BPU, CRU +;; Max issue 4 insns/clock cycle +;; Out-of-order execution, in-order completion + +;; No following instruction can dispatch in the same cycle as a branch +;; instruction. Not modelled. This is no problem if RCSP is not +;; enabled since the scheduler stops a schedule when it gets to a branch. + +;; Four insns can be dispatched per cycle. + +(define_insn_reservation "ppc604-load" 2 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "lsu_6xx") + +(define_insn_reservation "ppc604-fpload" 3 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "lsu_6xx") + +(define_insn_reservation "ppc604-store" 3 + (and (eq_attr "type" "store,fpstore,store_ux,store_u,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "lsu_6xx") + +(define_insn_reservation "ppc604-llsc" 3 + (and (eq_attr "type" "load_l,store_c") + (eq_attr "cpu" "ppc604,ppc604e")) + "lsu_6xx") + +(define_insn_reservation "ppc630-llsc" 4 + (and (eq_attr "type" "load_l,store_c") + (eq_attr "cpu" "ppc620,ppc630")) + "lsu_6xx") + +(define_insn_reservation "ppc604-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "iu1_6xx|iu2_6xx") + +(define_insn_reservation "ppc604-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx") + +(define_insn_reservation "ppc604-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx") + +(define_insn_reservation "ppc604-imul" 4 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppc604")) + "mciu_6xx*2") + +(define_insn_reservation "ppc604e-imul" 2 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppc604e")) + "mciu_6xx") + +(define_insn_reservation "ppc620-imul" 5 + (and (eq_attr "type" "imul,imul_compare") + (eq_attr "cpu" "ppc620,ppc630")) + "mciu_6xx*3") + +(define_insn_reservation "ppc620-imul2" 4 + (and (eq_attr "type" "imul2") + (eq_attr "cpu" "ppc620,ppc630")) + "mciu_6xx*3") + +(define_insn_reservation "ppc620-imul3" 3 + (and (eq_attr "type" "imul3") + (eq_attr "cpu" "ppc620,ppc630")) + "mciu_6xx*3") + +(define_insn_reservation "ppc620-lmul" 7 + (and (eq_attr "type" "lmul,lmul_compare") + (eq_attr "cpu" "ppc620,ppc630")) + "mciu_6xx*5") + +(define_insn_reservation "ppc604-idiv" 20 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc604,ppc604e")) + "mciu_6xx*19") + +(define_insn_reservation "ppc620-idiv" 37 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc620")) + "mciu_6xx*36") + +(define_insn_reservation "ppc630-idiv" 21 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc630")) + "mciu_6xx*20") + +(define_insn_reservation "ppc620-ldiv" 37 + (and (eq_attr "type" "ldiv") + (eq_attr "cpu" "ppc620,ppc630")) + "mciu_6xx*36") + +(define_insn_reservation "ppc604-compare" 3 + (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "(iu1_6xx|iu2_6xx)") + +; FPU PPC604{,e},PPC620 +(define_insn_reservation "ppc604-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc604,ppc604e,ppc620")) + "fpu_6xx") + +(define_insn_reservation "ppc604-fp" 3 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "ppc604,ppc604e,ppc620")) + "fpu_6xx") + +(define_insn_reservation "ppc604-dmul" 3 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "ppc604,ppc604e,ppc620")) + "fpu_6xx") + +; Divides are not pipelined +(define_insn_reservation "ppc604-sdiv" 18 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc604,ppc604e,ppc620")) + "fpu_6xx*18") + +(define_insn_reservation "ppc604-ddiv" 32 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc604,ppc604e,ppc620")) + "fpu_6xx*32") + +(define_insn_reservation "ppc620-ssqrt" 31 + (and (eq_attr "type" "ssqrt") + (eq_attr "cpu" "ppc620")) + "fpu_6xx*31") + +(define_insn_reservation "ppc620-dsqrt" 31 + (and (eq_attr "type" "dsqrt") + (eq_attr "cpu" "ppc620")) + "fpu_6xx*31") + + +; 2xFPU PPC630 +(define_insn_reservation "ppc630-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc630")) + "fpu1_6xx|fpu2_6xx") + +(define_insn_reservation "ppc630-fp" 3 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "ppc630")) + "fpu1_6xx|fpu2_6xx") + +(define_insn_reservation "ppc630-sdiv" 17 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc630")) + "fpu1_6xx*17|fpu2_6xx*17") + +(define_insn_reservation "ppc630-ddiv" 21 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc630")) + "fpu1_6xx*21|fpu2_6xx*21") + +(define_insn_reservation "ppc630-ssqrt" 18 + (and (eq_attr "type" "ssqrt") + (eq_attr "cpu" "ppc630")) + "fpu1_6xx*18|fpu2_6xx*18") + +(define_insn_reservation "ppc630-dsqrt" 25 + (and (eq_attr "type" "dsqrt") + (eq_attr "cpu" "ppc630")) + "fpu1_6xx*25|fpu2_6xx*25") + +(define_insn_reservation "ppc604-mfcr" 3 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "mciu_6xx") + +(define_insn_reservation "ppc604-mtcr" 2 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "iu1_6xx|iu2_6xx") + +(define_insn_reservation "ppc604-crlogical" 2 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppc604")) + "bpu_6xx") + +(define_insn_reservation "ppc604e-crlogical" 2 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppc604e,ppc620,ppc630")) + "cru_6xx") + +(define_insn_reservation "ppc604-mtjmpr" 2 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "mciu_6xx") + +(define_insn_reservation "ppc604-mfjmpr" 3 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "ppc604,ppc604e,ppc620")) + "mciu_6xx") + +(define_insn_reservation "ppc630-mfjmpr" 2 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "ppc630")) + "mciu_6xx") + +(define_insn_reservation "ppc604-jmpreg" 1 + (and (eq_attr "type" "jmpreg,branch") + (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630")) + "bpu_6xx") + +(define_insn_reservation "ppc604-isync" 0 + (and (eq_attr "type" "isync") + (eq_attr "cpu" "ppc604,ppc604e")) + "bpu_6xx") + +(define_insn_reservation "ppc630-isync" 6 + (and (eq_attr "type" "isync") + (eq_attr "cpu" "ppc620,ppc630")) + "bpu_6xx") + +(define_insn_reservation "ppc604-sync" 35 + (and (eq_attr "type" "sync") + (eq_attr "cpu" "ppc604,ppc604e")) + "lsu_6xx") + +(define_insn_reservation "ppc630-sync" 26 + (and (eq_attr "type" "sync") + (eq_attr "cpu" "ppc620,ppc630")) + "lsu_6xx") + diff --git a/gcc/config/rs6000/7450.md b/gcc/config/rs6000/7450.md new file mode 100644 index 000000000..ccaa3b20d --- /dev/null +++ b/gcc/config/rs6000/7450.md @@ -0,0 +1,185 @@ +;; Scheduling description for Motorola PowerPC 7450 processor. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppc7450,ppc7450mciu,ppc7450fp,ppc7450vec") +(define_cpu_unit "iu1_7450,iu2_7450,iu3_7450" "ppc7450") +(define_cpu_unit "mciu_7450" "ppc7450mciu") +(define_cpu_unit "fpu_7450" "ppc7450fp") +(define_cpu_unit "lsu_7450,bpu_7450" "ppc7450") +(define_cpu_unit "du1_7450,du2_7450,du3_7450" "ppc7450") +(define_cpu_unit "vecsmpl_7450,veccmplx_7450,vecflt_7450,vecperm_7450" "ppc7450vec") +(define_cpu_unit "vdu1_7450,vdu2_7450" "ppc7450vec") + + +;; PPC7450 32-bit 3xIU, MCIU, LSU, SRU, FPU, BPU, 4xVEC +;; IU1,IU2,IU3 can perform all integer operations +;; MCIU performs imul and idiv, cr logical, SPR moves +;; LSU 2 stage pipelined +;; FPU 3 stage pipelined +;; It also has 4 vector units, one for each type of vector instruction. +;; However, we can only dispatch 2 instructions per cycle. +;; Max issue 3 insns/clock cycle (includes 1 branch) +;; In-order execution + +;; Branches go straight to the BPU. All other insns are handled +;; by a dispatch unit which can issue a max of 3 insns per cycle. +(define_reservation "ppc7450_du" "du1_7450|du2_7450|du3_7450") +(define_reservation "ppc7450_vec_du" "vdu1_7450|vdu2_7450") + +(define_insn_reservation "ppc7450-load" 3 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\ + load_ux,load_u,vecload") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,lsu_7450") + +(define_insn_reservation "ppc7450-store" 3 + (and (eq_attr "type" "store,store_ux,store_u,vecstore") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,lsu_7450") + +(define_insn_reservation "ppc7450-fpload" 4 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,lsu_7450") + +(define_insn_reservation "ppc7450-fpstore" 3 + (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,lsu_7450*3") + +(define_insn_reservation "ppc7450-llsc" 3 + (and (eq_attr "type" "load_l,store_c") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,lsu_7450") + +(define_insn_reservation "ppc7450-sync" 35 + (and (eq_attr "type" "sync") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,lsu_7450") + +(define_insn_reservation "ppc7450-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\ + trap,var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,iu1_7450|iu2_7450|iu3_7450") + +(define_insn_reservation "ppc7450-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450") + +(define_insn_reservation "ppc7450-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,\ + iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450") + +(define_insn_reservation "ppc7450-imul" 4 + (and (eq_attr "type" "imul,imul_compare") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,mciu_7450*2") + +(define_insn_reservation "ppc7450-imul2" 3 + (and (eq_attr "type" "imul2,imul3") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,mciu_7450") + +(define_insn_reservation "ppc7450-idiv" 23 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,mciu_7450*23") + +(define_insn_reservation "ppc7450-compare" 2 + (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,(iu1_7450|iu2_7450|iu3_7450)") + +(define_insn_reservation "ppc7450-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,fpu_7450") + +(define_insn_reservation "ppc7450-fp" 5 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,fpu_7450") + +; Divides are not pipelined +(define_insn_reservation "ppc7450-sdiv" 21 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,fpu_7450*21") + +(define_insn_reservation "ppc7450-ddiv" 35 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,fpu_7450*35") + +(define_insn_reservation "ppc7450-mfcr" 2 + (and (eq_attr "type" "mfcr,mtcr") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,mciu_7450") + +(define_insn_reservation "ppc7450-crlogical" 1 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,mciu_7450") + +(define_insn_reservation "ppc7450-mtjmpr" 2 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "ppc7450")) + "nothing,mciu_7450*2") + +(define_insn_reservation "ppc7450-mfjmpr" 3 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "ppc7450")) + "nothing,mciu_7450*2") + +(define_insn_reservation "ppc7450-jmpreg" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "ppc7450")) + "nothing,bpu_7450") + +;; Altivec +(define_insn_reservation "ppc7450-vecsimple" 1 + (and (eq_attr "type" "vecsimple") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,ppc7450_vec_du,vecsmpl_7450") + +(define_insn_reservation "ppc7450-veccomplex" 4 + (and (eq_attr "type" "veccomplex") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,ppc7450_vec_du,veccmplx_7450") + +(define_insn_reservation "ppc7450-veccmp" 2 + (and (eq_attr "type" "veccmp") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,ppc7450_vec_du,veccmplx_7450") + +(define_insn_reservation "ppc7450-vecfloat" 4 + (and (eq_attr "type" "vecfloat") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,ppc7450_vec_du,vecflt_7450") + +(define_insn_reservation "ppc7450-vecperm" 2 + (and (eq_attr "type" "vecperm") + (eq_attr "cpu" "ppc7450")) + "ppc7450_du,ppc7450_vec_du,vecperm_7450") + diff --git a/gcc/config/rs6000/750cl.h b/gcc/config/rs6000/750cl.h new file mode 100644 index 000000000..0fa169845 --- /dev/null +++ b/gcc/config/rs6000/750cl.h @@ -0,0 +1,30 @@ +/* Enable 750cl paired single support. + Copyright (C) 2007, 2009 Free Software Foundation, Inc. + Contributed by Revital Eres (eres@il.ibm.com) + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#undef TARGET_PAIRED_FLOAT +#define TARGET_PAIRED_FLOAT rs6000_paired_float + +#undef ASM_CPU_SPEC +#define ASM_CPU_SPEC "-m750cl" + diff --git a/gcc/config/rs6000/7xx.md b/gcc/config/rs6000/7xx.md new file mode 100644 index 000000000..edbde75c2 --- /dev/null +++ b/gcc/config/rs6000/7xx.md @@ -0,0 +1,184 @@ +;; Scheduling description for Motorola PowerPC 750 and PowerPC 7400 processors. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppc7xx,ppc7xxfp") +(define_cpu_unit "iu1_7xx,iu2_7xx" "ppc7xx") +(define_cpu_unit "fpu_7xx" "ppc7xxfp") +(define_cpu_unit "lsu_7xx,bpu_7xx,sru_7xx" "ppc7xx") +(define_cpu_unit "du1_7xx,du2_7xx" "ppc7xx") +(define_cpu_unit "veccmplx_7xx,vecperm_7xx,vdu_7xx" "ppc7xx") + +;; PPC740/PPC750/PPC7400 32-bit 2xIU, LSU, SRU, FPU, BPU +;; IU1 can perform all integer operations +;; IU2 can perform all integer operations except imul and idiv +;; LSU 2 stage pipelined +;; FPU 3 stage pipelined +;; Max issue 3 insns/clock cycle (includes 1 branch) +;; In-order execution + + +;; The PPC750 user's manual recommends that to reduce branch mispredictions, +;; the insn that sets CR bits should be separated from the branch insn +;; that evaluates them. There is no advantage have more than 10 cycles +;; of separation. +;; This could be artificially achieved by exaggerating the latency of +;; compare insns but at the expense of a poorer schedule. + +;; Branches go straight to the BPU. All other insns are handled +;; by a dispatch unit which can issue a max of 2 insns per cycle. +(define_reservation "ppc750_du" "du1_7xx|du2_7xx") +(define_reservation "ppc7400_vec_du" "vdu_7xx") + +(define_insn_reservation "ppc750-load" 2 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\ + load_ux,load_u,fpload,fpload_ux,fpload_u,\ + vecload,load_l") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,lsu_7xx") + +(define_insn_reservation "ppc750-store" 2 + (and (eq_attr "type" "store,store_ux,store_u,\ + fpstore,fpstore_ux,fpstore_u,vecstore") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,lsu_7xx") + +(define_insn_reservation "ppc750-storec" 8 + (and (eq_attr "type" "store_c") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,lsu_7xx") + +(define_insn_reservation "ppc750-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\ + trap,var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx|iu2_7xx") + +(define_insn_reservation "ppc750-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx") + +(define_insn_reservation "ppc750-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx") + +(define_insn_reservation "ppc750-imul" 4 + (and (eq_attr "type" "imul,imul_compare") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx*4") + +(define_insn_reservation "ppc750-imul2" 3 + (and (eq_attr "type" "imul2") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx*2") + +(define_insn_reservation "ppc750-imul3" 2 + (and (eq_attr "type" "imul3") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx") + +(define_insn_reservation "ppc750-idiv" 19 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx*19") + +(define_insn_reservation "ppc750-compare" 2 + (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,(iu1_7xx|iu2_7xx)") + +(define_insn_reservation "ppc750-fpcompare" 2 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,fpu_7xx") + +(define_insn_reservation "ppc750-fp" 3 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,fpu_7xx") + +(define_insn_reservation "ppc750-dmul" 4 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "ppc750")) + "ppc750_du,fpu_7xx*2") + +(define_insn_reservation "ppc7400-dmul" 3 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "ppc7400")) + "ppc750_du,fpu_7xx") + +; Divides are not pipelined +(define_insn_reservation "ppc750-sdiv" 17 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,fpu_7xx*17") + +(define_insn_reservation "ppc750-ddiv" 31 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,fpu_7xx*31") + +(define_insn_reservation "ppc750-mfcr" 2 + (and (eq_attr "type" "mfcr,mtcr") + (eq_attr "cpu" "ppc750,ppc7400")) + "ppc750_du,iu1_7xx") + +(define_insn_reservation "ppc750-crlogical" 3 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppc750,ppc7400")) + "nothing,sru_7xx*2") + +(define_insn_reservation "ppc750-mtjmpr" 2 + (and (eq_attr "type" "mtjmpr,isync,sync") + (eq_attr "cpu" "ppc750,ppc7400")) + "nothing,sru_7xx*2") + +(define_insn_reservation "ppc750-mfjmpr" 3 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "ppc750,ppc7400")) + "nothing,sru_7xx*2") + +(define_insn_reservation "ppc750-jmpreg" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "ppc750,ppc7400")) + "nothing,bpu_7xx") + +;; Altivec +(define_insn_reservation "ppc7400-vecsimple" 1 + (and (eq_attr "type" "vecsimple,veccmp") + (eq_attr "cpu" "ppc7400")) + "ppc750_du,ppc7400_vec_du,veccmplx_7xx") + +(define_insn_reservation "ppc7400-veccomplex" 4 + (and (eq_attr "type" "veccomplex") + (eq_attr "cpu" "ppc7400")) + "ppc750_du,ppc7400_vec_du,veccmplx_7xx") + +(define_insn_reservation "ppc7400-vecfloat" 4 + (and (eq_attr "type" "vecfloat") + (eq_attr "cpu" "ppc7400")) + "ppc750_du,ppc7400_vec_du,veccmplx_7xx") + +(define_insn_reservation "ppc7400-vecperm" 2 + (and (eq_attr "type" "vecperm") + (eq_attr "cpu" "ppc7400")) + "ppc750_du,ppc7400_vec_du,vecperm_7xx") + diff --git a/gcc/config/rs6000/8540.md b/gcc/config/rs6000/8540.md new file mode 100644 index 000000000..4096dff43 --- /dev/null +++ b/gcc/config/rs6000/8540.md @@ -0,0 +1,250 @@ +;; Pipeline description for Motorola PowerPC 8540 processor. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppc8540_most,ppc8540_long,ppc8540_retire") +(define_cpu_unit "ppc8540_decode_0,ppc8540_decode_1" "ppc8540_most") + +;; We don't simulate general issue queue (GIC). If we have SU insn +;; and then SU1 insn, they cannot be issued on the same cycle +;; (although SU1 insn and then SU insn can be issued) because the SU +;; insn will go to SU1 from GIC0 entry. Fortunately, the first cycle +;; multipass insn scheduling will find the situation and issue the SU1 +;; insn and then the SU insn. +(define_cpu_unit "ppc8540_issue_0,ppc8540_issue_1" "ppc8540_most") + +;; We could describe completion buffers slots in combination with the +;; retirement units and the order of completion but the result +;; automaton would behave in the same way because we cannot describe +;; real latency time with taking in order completion into account. +;; Actually we could define the real latency time by querying reserved +;; automaton units but the current scheduler uses latency time before +;; issuing insns and making any reservations. +;; +;; So our description is aimed to achieve a insn schedule in which the +;; insns would not wait in the completion buffer. +(define_cpu_unit "ppc8540_retire_0,ppc8540_retire_1" "ppc8540_retire") + +;; Branch unit: +(define_cpu_unit "ppc8540_bu" "ppc8540_most") + +;; SU: +(define_cpu_unit "ppc8540_su0_stage0,ppc8540_su1_stage0" "ppc8540_most") + +;; We could describe here MU subunits for float multiply, float add +;; etc. But the result automaton would behave the same way as the +;; described one pipeline below because MU can start only one insn +;; per cycle. Actually we could simplify the automaton more not +;; describing stages 1-3, the result automata would be the same. +(define_cpu_unit "ppc8540_mu_stage0,ppc8540_mu_stage1" "ppc8540_most") +(define_cpu_unit "ppc8540_mu_stage2,ppc8540_mu_stage3" "ppc8540_most") + +;; The following unit is used to describe non-pipelined division. +(define_cpu_unit "ppc8540_mu_div" "ppc8540_long") + +;; Here we simplified LSU unit description not describing the stages. +(define_cpu_unit "ppc8540_lsu" "ppc8540_most") + +;; The following units are used to make automata deterministic +(define_cpu_unit "present_ppc8540_decode_0" "ppc8540_most") +(define_cpu_unit "present_ppc8540_issue_0" "ppc8540_most") +(define_cpu_unit "present_ppc8540_retire_0" "ppc8540_retire") +(define_cpu_unit "present_ppc8540_su0_stage0" "ppc8540_most") + +;; The following sets to make automata deterministic when option ndfa is used. +(presence_set "present_ppc8540_decode_0" "ppc8540_decode_0") +(presence_set "present_ppc8540_issue_0" "ppc8540_issue_0") +(presence_set "present_ppc8540_retire_0" "ppc8540_retire_0") +(presence_set "present_ppc8540_su0_stage0" "ppc8540_su0_stage0") + +;; Some useful abbreviations. +(define_reservation "ppc8540_decode" + "ppc8540_decode_0|ppc8540_decode_1+present_ppc8540_decode_0") +(define_reservation "ppc8540_issue" + "ppc8540_issue_0|ppc8540_issue_1+present_ppc8540_issue_0") +(define_reservation "ppc8540_retire" + "ppc8540_retire_0|ppc8540_retire_1+present_ppc8540_retire_0") +(define_reservation "ppc8540_su_stage0" + "ppc8540_su0_stage0|ppc8540_su1_stage0+present_ppc8540_su0_stage0") + +;; Simple SU insns +(define_insn_reservation "ppc8540_su" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,cmp,compare,\ + delayed_compare,var_delayed_compare,fast_compare,\ + shift,trap,var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire") + +(define_insn_reservation "ppc8540_two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\ + ppc8540_issue+ppc8540_su_stage0+ppc8540_retire") + +(define_insn_reservation "ppc8540_three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\ + ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\ + ppc8540_issue+ppc8540_su_stage0+ppc8540_retire") + +;; Branch. Actually this latency time is not used by the scheduler. +(define_insn_reservation "ppc8540_branch" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_bu,ppc8540_retire") + +;; Multiply +(define_insn_reservation "ppc8540_multiply" 4 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\ + ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire") + +;; Divide. We use the average latency time here. We omit reserving a +;; retire unit because of the result automata will be huge. We ignore +;; reservation of miu_stage3 here because we use the average latency +;; time. +(define_insn_reservation "ppc8540_divide" 14 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\ + ppc8540_mu_div*13") + +;; CR logical +(define_insn_reservation "ppc8540_cr_logical" 1 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_bu,ppc8540_retire") + +;; Mfcr +(define_insn_reservation "ppc8540_mfcr" 1 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire") + +;; Mtcrf +(define_insn_reservation "ppc8540_mtcrf" 1 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire") + +;; Mtjmpr +(define_insn_reservation "ppc8540_mtjmpr" 1 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire") + +;; Loads +(define_insn_reservation "ppc8540_load" 3 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,sync") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire") + +;; Stores. +(define_insn_reservation "ppc8540_store" 3 + (and (eq_attr "type" "store,store_ux,store_u,store_c") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire") + +;; Simple FP +(define_insn_reservation "ppc8540_simple_float" 1 + (and (eq_attr "type" "fpsimple") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire") + +;; FP +(define_insn_reservation "ppc8540_float" 4 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\ + ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire") + +;; float divides. We omit reserving a retire unit and miu_stage3 +;; because of the result automata will be huge. +(define_insn_reservation "ppc8540_float_vector_divide" 29 + (and (eq_attr "type" "vecfdiv") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\ + ppc8540_mu_div*28") + +;; Brinc +(define_insn_reservation "ppc8540_brinc" 1 + (and (eq_attr "type" "brinc") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire") + +;; Simple vector +(define_insn_reservation "ppc8540_simple_vector" 1 + (and (eq_attr "type" "vecsimple") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire") + +;; Simple vector compare +(define_insn_reservation "ppc8540_simple_vector_compare" 1 + (and (eq_attr "type" "veccmpsimple") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire") + +;; Vector compare +(define_insn_reservation "ppc8540_vector_compare" 1 + (and (eq_attr "type" "veccmp") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire") + +;; evsplatfi evsplati +(define_insn_reservation "ppc8540_vector_perm" 1 + (and (eq_attr "type" "vecperm") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire") + +;; Vector float +(define_insn_reservation "ppc8540_float_vector" 4 + (and (eq_attr "type" "vecfloat") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\ + ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire") + +;; Vector divides: Use the average. We omit reserving a retire unit +;; because of the result automata will be huge. We ignore reservation +;; of miu_stage3 here because we use the average latency time. +(define_insn_reservation "ppc8540_vector_divide" 14 + (and (eq_attr "type" "vecdiv") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\ + ppc8540_mu_div*13") + +;; Complex vector. +(define_insn_reservation "ppc8540_complex_vector" 4 + (and (eq_attr "type" "veccomplex") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\ + ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire") + +;; Vector load +(define_insn_reservation "ppc8540_vector_load" 3 + (and (eq_attr "type" "vecload") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire") + +;; Vector store +(define_insn_reservation "ppc8540_vector_store" 3 + (and (eq_attr "type" "vecstore") + (eq_attr "cpu" "ppc8540")) + "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire") diff --git a/gcc/config/rs6000/a2.md b/gcc/config/rs6000/a2.md new file mode 100644 index 000000000..851d8949f --- /dev/null +++ b/gcc/config/rs6000/a2.md @@ -0,0 +1,134 @@ +;; Scheduling description for PowerPC A2 processors. +;; Copyright (C) 2009 Free Software Foundation, Inc. +;; Contributed by Ben Elliston (bje@au.ibm.com) + +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppca2") + +;; CPU units + +;; The multiplier pipeline. +(define_cpu_unit "mult" "ppca2") + +;; The auxillary processor unit (FP/vector unit). +(define_cpu_unit "axu" "ppca2") + +;; D.4.6 +;; Some peculiarities for certain SPRs + +(define_insn_reservation "ppca2-mfcr" 1 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "ppca2")) + "nothing") + +(define_insn_reservation "ppca2-mfjmpr" 5 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "ppca2")) + "nothing") + +(define_insn_reservation "ppca2-mtjmpr" 5 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "ppca2")) + "nothing") + +;; D.4.8 +(define_insn_reservation "ppca2-imul" 1 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppca2")) + "nothing") + +;; FIXME: latency and multiplier reservation for 64-bit multiply? +(define_insn_reservation "ppca2-lmul" 6 + (and (eq_attr "type" "lmul,lmul_compare") + (eq_attr "cpu" "ppca2")) + "mult*3") + +;; D.4.9 +(define_insn_reservation "ppca2-idiv" 32 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppca2")) + "mult*32") + +(define_insn_reservation "ppca2-ldiv" 65 + (and (eq_attr "type" "ldiv") + (eq_attr "cpu" "ppca2")) + "mult*65") + +;; D.4.13 +(define_insn_reservation "ppca2-load" 5 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u") + (eq_attr "cpu" "ppca2")) + "nothing") + +;; D.8.1 +(define_insn_reservation "ppca2-fp" 6 + (and (eq_attr "type" "fp") ;; Ignore fpsimple insn types (SPE only). + (eq_attr "cpu" "ppca2")) + "axu") + +;; D.8.4 +(define_insn_reservation "ppca2-fp-load" 6 + (and (eq_attr "type" "fpload,fpload_u,fpload_ux") + (eq_attr "cpu" "ppca2")) + "axu") + +;; D.8.5 +(define_insn_reservation "ppca2-fp-store" 2 + (and (eq_attr "type" "fpstore,fpstore_u,fpstore_ux") + (eq_attr "cpu" "ppca2")) + "axu") + +;; D.8.6 +(define_insn_reservation "ppca2-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppca2")) + "axu") + +;; D.8.7 +;; +;; Instructions from the same thread succeeding the floating-point +;; divide cannot be executed until the floating-point divide has +;; completed. Since there is nothing else we can do, this thread will +;; just have to stall. + +(define_insn_reservation "ppca2-ddiv" 72 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppca2")) + "axu") + +(define_insn_reservation "ppca2-sdiv" 59 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppca2")) + "axu") + +;; D.8.8 +;; +;; Instructions from the same thread succeeding the floating-point +;; divide cannot be executed until the floating-point divide has +;; completed. Since there is nothing else we can do, this thread will +;; just have to stall. + +(define_insn_reservation "ppca2-dsqrt" 69 + (and (eq_attr "type" "dsqrt") + (eq_attr "cpu" "ppca2")) + "axu") + +(define_insn_reservation "ppca2-ssqrt" 65 + (and (eq_attr "type" "ssqrt") + (eq_attr "cpu" "ppca2")) + "axu") diff --git a/gcc/config/rs6000/aix-stdint.h b/gcc/config/rs6000/aix-stdint.h new file mode 100644 index 000000000..8b20c152a --- /dev/null +++ b/gcc/config/rs6000/aix-stdint.h @@ -0,0 +1,51 @@ +/* Definitions for types on systems using AIX. + Copyright (C) 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#define SIG_ATOMIC_TYPE "int" + +#define INT8_TYPE "signed char" +#define INT16_TYPE "short int" +#define INT32_TYPE "int" +#define INT64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int") +#define UINT8_TYPE "unsigned char" +#define UINT16_TYPE "short unsigned int" +#define UINT32_TYPE "unsigned int" +#define UINT64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int") + +#define INT_LEAST8_TYPE "signed char" +#define INT_LEAST16_TYPE "short int" +#define INT_LEAST32_TYPE "int" +#define INT_LEAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int") +#define UINT_LEAST8_TYPE "unsigned char" +#define UINT_LEAST16_TYPE "short unsigned int" +#define UINT_LEAST32_TYPE "unsigned int" +#define UINT_LEAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int") + +#define INT_FAST8_TYPE "signed char" +#define INT_FAST16_TYPE "short int" +#define INT_FAST32_TYPE "int" +#define INT_FAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int") +#define UINT_FAST8_TYPE "unsigned char" +#define UINT_FAST16_TYPE "short unsigned int" +#define UINT_FAST32_TYPE "unsigned int" +#define UINT_FAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int") + +#define INTPTR_TYPE "long int" +#define UINTPTR_TYPE "long unsigned int" + diff --git a/gcc/config/rs6000/aix.h b/gcc/config/rs6000/aix.h new file mode 100644 index 000000000..7f60d329b --- /dev/null +++ b/gcc/config/rs6000/aix.h @@ -0,0 +1,260 @@ +/* Definitions of target machine for GNU compiler, + for IBM RS/6000 POWER running AIX. + Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* Yes! We are AIX! */ +#define DEFAULT_ABI ABI_AIX +#undef TARGET_AIX +#define TARGET_AIX 1 + +/* Linux64.h wants to redefine TARGET_AIX based on -m64, but it can't be used + in the #if conditional in options-default.h, so provide another macro. */ +#undef TARGET_AIX_OS +#define TARGET_AIX_OS 1 + +/* AIX always has a TOC. */ +#define TARGET_NO_TOC 0 +#define TARGET_TOC 1 +#define FIXED_R2 1 + +/* AIX allows r13 to be used in 32-bit mode. */ +#define FIXED_R13 0 + +/* 32-bit and 64-bit AIX stack boundary is 128. */ +#undef STACK_BOUNDARY +#define STACK_BOUNDARY 128 + +#undef TARGET_IEEEQUAD +#define TARGET_IEEEQUAD 0 + +/* The AIX linker will discard static constructors in object files before + collect has a chance to see them, so scan the object files directly. */ +#define COLLECT_EXPORT_LIST + +#if HAVE_AS_REF +/* Issue assembly directives that create a reference to the given DWARF table + identifier label from the current function section. This is defined to + ensure we drag frame frame tables associated with needed function bodies in + a link with garbage collection activated. */ +#define ASM_OUTPUT_DWARF_TABLE_REF rs6000_aix_asm_output_dwarf_table_ref +#endif + +/* This is the only version of nm that collect2 can work with. */ +#define REAL_NM_FILE_NAME "/usr/ucb/nm" + +#define USER_LABEL_PREFIX "" + +/* Don't turn -B into -L if the argument specifies a relative file name. */ +#define RELATIVE_PREFIX_NOT_LINKDIR + +/* Because of the above, we must have gcc search itself to find libgcc.a. */ +#define LINK_LIBGCC_SPECIAL_1 + +#define MFWRAP_SPEC " %{static: %{fmudflap|fmudflapth: \ + -brename:malloc,__wrap_malloc -brename:__real_malloc,malloc \ + -brename:free,__wrap_free -brename:__real_free,free \ + -brename:calloc,__wrap_calloc -brename:__real_calloc,calloc \ + -brename:realloc,__wrap_realloc -brename:__real_realloc,realloc \ + -brename:mmap,__wrap_mmap -brename:__real_mmap,mmap \ + -brename:munmap,__wrap_munmap -brename:__real_munmap,munmap \ + -brename:alloca,__wrap_alloca -brename:__real_alloca,alloca \ +} %{fmudflapth: \ + -brename:pthread_create,__wrap_pthread_create \ + -brename:__real_pthread_create,pthread_create \ + -brename:pthread_join,__wrap_pthread_join \ + -brename:__real_pthread_join,pthread_join \ + -brename:pthread_exit,__wrap_pthread_exit \ + -brename:__real_pthread_exit,pthread_exit \ +}} %{fmudflap|fmudflapth: \ + -brename:main,__wrap_main -brename:__real_main,main \ +}" + +#define MFLIB_SPEC " %{fmudflap: -lmudflap \ + %{static:%(link_gcc_c_sequence) -lmudflap}} \ + %{fmudflapth: -lmudflapth -lpthread \ + %{static:%(link_gcc_c_sequence) -lmudflapth}} " + +/* Names to predefine in the preprocessor for this target machine. */ +#define TARGET_OS_AIX_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("_IBMR2"); \ + builtin_define ("_POWER"); \ + builtin_define ("_AIX"); \ + builtin_define ("_AIX32"); \ + builtin_define ("_AIX41"); \ + builtin_define ("_LONG_LONG"); \ + if (TARGET_LONG_DOUBLE_128) \ + builtin_define ("__LONGDOUBLE128"); \ + builtin_assert ("system=unix"); \ + builtin_assert ("system=aix"); \ + } \ + while (0) + +/* Define appropriate architecture macros for preprocessor depending on + target switches. */ + +#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\ + %{ansi: -D_ANSI_C_SOURCE}" + +#define CC1_SPEC "%(cc1_cpu)" + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "" + +/* Tell the assembler to assume that all undefined names are external. + + Don't do this until the fixed IBM assembler is more generally available. + When this becomes permanently defined, the ASM_OUTPUT_EXTERNAL, + ASM_OUTPUT_EXTERNAL_LIBCALL, and RS6000_OUTPUT_BASENAME macros will no + longer be needed. Also, the extern declaration of mcount in + rs6000_xcoff_file_start will no longer be needed. */ + +/* #define ASM_SPEC "-u %(asm_cpu)" */ + +/* Default location of syscalls.exp under AIX */ +#define LINK_SYSCALLS_SPEC "-bI:%R/lib/syscalls.exp" + +/* Default location of libg.exp under AIX */ +#define LINK_LIBG_SPEC "-bexport:%R/usr/lib/libg.exp" + +/* Define the options for the binder: Start text at 512, align all segments + to 512 bytes, and warn if there is text relocation. + + The -bhalt:4 option supposedly changes the level at which ld will abort, + but it also suppresses warnings about multiply defined symbols and is + used by the AIX cc command. So we use it here. + + -bnodelcsect undoes a poor choice of default relating to multiply-defined + csects. See AIX documentation for more information about this. + + -bM:SRE tells the linker that the output file is Shared REusable. Note + that to actually build a shared library you will also need to specify an + export list with the -Wl,-bE option. */ + +#define LINK_SPEC "-T512 -H512 %{!r:-btextro} -bhalt:4 -bnodelcsect\ +%{static:-bnso %(link_syscalls) } \ +%{!shared:%{g*: %(link_libg) }} %{shared:-bM:SRE}" + +/* Profiled library versions are used by linking with special directories. */ +#define LIB_SPEC "%{pg:-L%R/lib/profiled -L%R/usr/lib/profiled}\ +%{p:-L%R/lib/profiled -L%R/usr/lib/profiled} %{!shared:%{g*:-lg}} -lc" + +/* Static linking with shared libstdc++ requires libsupc++ as well. */ +#define LIBSTDCXX_STATIC "supc++" + +/* This now supports a natural alignment mode. */ +/* AIX word-aligns FP doubles but doubleword-aligns 64-bit ints. */ +#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \ + ((TARGET_ALIGN_NATURAL == 0 \ + && TYPE_MODE (strip_array_types (TREE_TYPE (FIELD))) == DFmode) \ + ? MIN ((COMPUTED), 32) \ + : (COMPUTED)) + +/* AIX increases natural record alignment to doubleword if the first + field is an FP double while the FP fields remain word aligned. */ +#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \ + ((TREE_CODE (STRUCT) == RECORD_TYPE \ + || TREE_CODE (STRUCT) == UNION_TYPE \ + || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \ + && TARGET_ALIGN_NATURAL == 0 \ + ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \ + : MAX ((COMPUTED), (SPECIFIED))) + +/* The AIX ABI isn't explicit on whether aggregates smaller than a + word/doubleword should be padded upward or downward. One could + reasonably assume that they follow the normal rules for structure + layout treating the parameter area as any other block of memory, + then map the reg param area to registers, i.e., pad upward, which + is the way IBM Compilers for AIX behave. + Setting both of the following defines results in this behavior. */ +#define AGGREGATE_PADDING_FIXED 1 +#define AGGREGATES_PAD_UPWARD_ALWAYS 1 + +/* Specify padding for the last element of a block move between + registers and memory. FIRST is nonzero if this is the only + element. */ +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ + (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE)) + +/* Indicate that jump tables go in the text section. */ + +#define JUMP_TABLES_IN_TEXT_SECTION 1 + +/* Define any extra SPECS that the compiler needs to generate. */ +#undef SUBTARGET_EXTRA_SPECS +#define SUBTARGET_EXTRA_SPECS \ + { "link_syscalls", LINK_SYSCALLS_SPEC }, \ + { "link_libg", LINK_LIBG_SPEC } + +/* Define cutoff for using external functions to save floating point. */ +#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) == 62 || (FIRST_REG) == 63) +/* And similarly for general purpose registers. */ +#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32) + +/* If the current unwind info (FS) does not contain explicit info + saving R2, then we have to do a minor amount of code reading to + figure out if it was saved. The big problem here is that the + code that does the save/restore is generated by the linker, so + we have no good way to determine at compile time what to do. */ + +#define R_LR 65 + +#ifdef __64BIT__ +#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \ + do { \ + if ((FS)->regs.reg[2].how == REG_UNSAVED) \ + { \ + unsigned int *insn \ + = (unsigned int *) \ + _Unwind_GetGR ((CTX), R_LR); \ + if (*insn == 0xE8410028) \ + _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 40); \ + } \ + } while (0) +#else +#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \ + do { \ + if ((FS)->regs.reg[2].how == REG_UNSAVED) \ + { \ + unsigned int *insn \ + = (unsigned int *) \ + _Unwind_GetGR ((CTX), R_LR); \ + if (*insn == 0x80410014) \ + _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 20); \ + } \ + } while (0) +#endif + +#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL) + +/* Print subsidiary information on the compiler version in use. */ +#define TARGET_VERSION ; + +/* No version of AIX fully supports AltiVec or 64-bit instructions in + 32-bit mode. */ +#define OS_MISSING_POWERPC64 1 +#define OS_MISSING_ALTIVEC 1 + +/* WINT_TYPE */ +#define WINT_TYPE "int" + +/* Static stack checking is supported by means of probes. */ +#define STACK_CHECK_STATIC_BUILTIN 1 diff --git a/gcc/config/rs6000/aix43.h b/gcc/config/rs6000/aix43.h new file mode 100644 index 000000000..8e285decb --- /dev/null +++ b/gcc/config/rs6000/aix43.h @@ -0,0 +1,185 @@ +/* Definitions of target machine for GNU compiler, + for IBM RS/6000 POWER running AIX version 4.3. + Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, + 2007, 2009, 2010 Free Software Foundation, Inc. + Contributed by David Edelsohn (edelsohn@gnu.org). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to + get control in TARGET_OPTION_OVERRIDE. */ + +#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2) +#define SUBTARGET_OVERRIDE_OPTIONS \ +do { \ + if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \ + { \ + target_flags &= ~NON_POWERPC_MASKS; \ + warning (0, "-maix64 and POWER architecture are incompatible"); \ + } \ + if (TARGET_64BIT && ! TARGET_POWERPC64) \ + { \ + target_flags |= MASK_POWERPC64; \ + warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \ + } \ + if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \ + { \ + rs6000_long_double_type_size = 64; \ + if (rs6000_explicit_options.long_double) \ + warning (0, "soft-float and long-double-128 are incompatible"); \ + } \ + if (TARGET_POWERPC64 && ! TARGET_64BIT) \ + { \ + error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \ + } \ +} while (0); + +#undef ASM_SPEC +#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)" + +/* Common ASM definitions used by ASM_SPEC amongst the various targets + for handling -mcpu=xxx switches. */ +#undef ASM_CPU_SPEC +#define ASM_CPU_SPEC \ +"%{!mcpu*: %{!maix64: \ + %{mpower: %{!mpower2: -mpwr}} \ + %{mpower2: -mpwr2} \ + %{mpowerpc*: %{!mpowerpc64: -mppc}} \ + %{mpowerpc64: -mppc64} \ + %{!mpower*: %{!mpowerpc*: %(asm_default)}}}} \ +%{mcpu=common: -mcom} \ +%{mcpu=power: -mpwr} \ +%{mcpu=power2: -mpwr2} \ +%{mcpu=power3: -m620} \ +%{mcpu=power4: -m620} \ +%{mcpu=powerpc: -mppc} \ +%{mcpu=rios: -mpwr} \ +%{mcpu=rios1: -mpwr} \ +%{mcpu=rios2: -mpwr2} \ +%{mcpu=rsc: -mpwr} \ +%{mcpu=rsc1: -mpwr} \ +%{mcpu=rs64a: -mppc} \ +%{mcpu=601: -m601} \ +%{mcpu=602: -mppc} \ +%{mcpu=603: -m603} \ +%{mcpu=603e: -m603} \ +%{mcpu=604: -m604} \ +%{mcpu=604e: -m604} \ +%{mcpu=620: -m620} \ +%{mcpu=630: -m620}" + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "-mcom" + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("_AIX43"); \ + TARGET_OS_AIX_CPP_BUILTINS (); \ + } \ + while (0) + +#undef CPP_SPEC +#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\ + %{ansi: -D_ANSI_C_SOURCE}\ + %{maix64: -D__64BIT__}\ + %{mpe: -I%R/usr/lpp/ppe.poe/include}\ + %{pthread: -D_THREAD_SAFE}" + +/* The GNU C++ standard library requires that these macros be + defined. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC \ + "-D_ALL_SOURCE \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT MASK_NEW_MNEMONICS + +#undef PROCESSOR_DEFAULT +#define PROCESSOR_DEFAULT PROCESSOR_PPC604e + +/* AIX does not support Altivec. */ +#undef TARGET_ALTIVEC +#define TARGET_ALTIVEC 0 +#undef TARGET_ALTIVEC_ABI +#define TARGET_ALTIVEC_ABI 0 + +/* Define this macro as a C expression for the initializer of an + array of string to tell the driver program which options are + defaults for this target and thus do not need to be handled + specially when using `MULTILIB_OPTIONS'. + + Do not define this macro if `MULTILIB_OPTIONS' is not defined in + the target makefile fragment or if none of the options listed in + `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */ + +#undef MULTILIB_DEFAULTS +#define MULTILIB_DEFAULTS { "mcpu=common" } + +#undef LIB_SPEC +#define LIB_SPEC "%{pg:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{p:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{!maix64:%{!shared:%{g*:-lg}}}\ + %{mpe:-L%R/usr/lpp/ppe.poe/lib -lmpi -lvtd}\ + %{pthread:-L%R/usr/lib/threads -lpthreads -lc_r %R/usr/lib/libc.a}\ + %{!pthread:-lc}" + +#undef LINK_SPEC +#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\ + %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\ + %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\ + %{mpe:-binitfini:poe_remote_main}" + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "%{!shared:\ + %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\ + %{!maix64:\ + %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\ + %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}" + +/* AIX 4.3 typedefs ptrdiff_t as "long" while earlier releases used "int". */ + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "long int" + +/* AIX 4 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC + and "cror 31,31,31" for POWER architecture. */ + +#undef RS6000_CALL_GLUE +#define RS6000_CALL_GLUE "{cror 31,31,31|nop}" + +/* AIX 4.2 and above provides initialization and finalization function + support from linker command line. */ +#undef HAS_INIT_SECTION +#define HAS_INIT_SECTION + +#undef LD_INIT_SWITCH +#define LD_INIT_SWITCH "-binitfini" + +/* The IBM AIX 4.x assembler doesn't support forward references in + .set directives. We handle this by deferring the output of .set + directives to the end of the compilation unit. */ +#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true + +/* This target uses the aix64.opt file. */ +#define TARGET_USES_AIX64_OPT 1 + +#define TARGET_AIX_VERSION 43 diff --git a/gcc/config/rs6000/aix51.h b/gcc/config/rs6000/aix51.h new file mode 100644 index 000000000..90d504f61 --- /dev/null +++ b/gcc/config/rs6000/aix51.h @@ -0,0 +1,189 @@ +/* Definitions of target machine for GNU compiler, + for IBM RS/6000 POWER running AIX V5. + Copyright (C) 2001, 2003, 2004, 2005, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. + Contributed by David Edelsohn (edelsohn@gnu.org). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to + get control in TARGET_OPTION_OVERRIDE. */ + +#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2) +#define SUBTARGET_OVERRIDE_OPTIONS \ +do { \ + if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \ + { \ + target_flags &= ~NON_POWERPC_MASKS; \ + warning (0, "-maix64 and POWER architecture are incompatible"); \ + } \ + if (TARGET_64BIT && ! TARGET_POWERPC64) \ + { \ + target_flags |= MASK_POWERPC64; \ + warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \ + } \ + if (TARGET_POWERPC64 && ! TARGET_64BIT) \ + { \ + error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \ + } \ +} while (0); + +#undef ASM_SPEC +#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)" + +/* Common ASM definitions used by ASM_SPEC amongst the various targets + for handling -mcpu=xxx switches. */ +#undef ASM_CPU_SPEC +#define ASM_CPU_SPEC \ +"%{!mcpu*: %{!maix64: \ + %{mpower: %{!mpower2: -mpwr}} \ + %{mpower2: -mpwr2} \ + %{mpowerpc*: %{!mpowerpc64: -mppc}} \ + %{mpowerpc64: -mppc64} \ + %{!mpower*: %{!mpowerpc*: %(asm_default)}}}} \ +%{mcpu=common: -mcom} \ +%{mcpu=power: -mpwr} \ +%{mcpu=power2: -mpwr2} \ +%{mcpu=power3: -m620} \ +%{mcpu=power4: -m620} \ +%{mcpu=powerpc: -mppc} \ +%{mcpu=rios: -mpwr} \ +%{mcpu=rios1: -mpwr} \ +%{mcpu=rios2: -mpwr2} \ +%{mcpu=rsc: -mpwr} \ +%{mcpu=rsc1: -mpwr} \ +%{mcpu=rs64a: -mppc} \ +%{mcpu=601: -m601} \ +%{mcpu=602: -mppc} \ +%{mcpu=603: -m603} \ +%{mcpu=603e: -m603} \ +%{mcpu=604: -m604} \ +%{mcpu=604e: -m604} \ +%{mcpu=620: -m620} \ +%{mcpu=630: -m620} \ +%{mcpu=970: -m620} \ +%{mcpu=G5: -m620}" + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "-mcom" + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("_AIX43"); \ + builtin_define ("_AIX51"); \ + TARGET_OS_AIX_CPP_BUILTINS (); \ + } \ + while (0) + +#undef CPP_SPEC +#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \ + %{ansi: -D_ANSI_C_SOURCE} \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +/* The GNU C++ standard library requires that these macros be + defined. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC \ + "-D_ALL_SOURCE \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT MASK_NEW_MNEMONICS + +#undef PROCESSOR_DEFAULT +#define PROCESSOR_DEFAULT PROCESSOR_PPC604e + +/* AIX does not support Altivec. */ +#undef TARGET_ALTIVEC +#define TARGET_ALTIVEC 0 +#undef TARGET_ALTIVEC_ABI +#define TARGET_ALTIVEC_ABI 0 + +/* Define this macro as a C expression for the initializer of an + array of string to tell the driver program which options are + defaults for this target and thus do not need to be handled + specially when using `MULTILIB_OPTIONS'. + + Do not define this macro if `MULTILIB_OPTIONS' is not defined in + the target makefile fragment or if none of the options listed in + `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */ + +#undef MULTILIB_DEFAULTS +#define MULTILIB_DEFAULTS { "mcpu=common" } + +#undef LIB_SPEC +#define LIB_SPEC "%{pg:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{p:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{!maix64:%{!shared:%{g*:-lg}}}\ + %{mpe:-L%R/usr/lpp/ppe.poe/lib -lmpi -lvtd}\ + %{pthread:-lpthreads} -lc" + +#undef LINK_SPEC +#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\ + %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\ + %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\ + %{mpe:-binitfini:poe_remote_main}" + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "%{!shared:\ + %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\ + %{!maix64:\ + %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\ + %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}" + +/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */ + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "long int" + +/* Type used for wchar_t, as a string used in a declaration. */ +#undef WCHAR_TYPE +#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int") + +/* Width of wchar_t in bits. */ +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32) + +/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC + and "cror 31,31,31" for POWER architecture. */ + +#undef RS6000_CALL_GLUE +#define RS6000_CALL_GLUE "{cror 31,31,31|nop}" + +/* AIX 4.2 and above provides initialization and finalization function + support from linker command line. */ +#undef HAS_INIT_SECTION +#define HAS_INIT_SECTION + +#undef LD_INIT_SWITCH +#define LD_INIT_SWITCH "-binitfini" + +/* This target uses the aix64.opt file. */ +#define TARGET_USES_AIX64_OPT 1 + +/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION, + but does not have crtbegin/end. */ + +#define TARGET_USE_JCR_SECTION 0 + +#define TARGET_AIX_VERSION 51 diff --git a/gcc/config/rs6000/aix52.h b/gcc/config/rs6000/aix52.h new file mode 100644 index 000000000..a0fa21886 --- /dev/null +++ b/gcc/config/rs6000/aix52.h @@ -0,0 +1,199 @@ +/* Definitions of target machine for GNU compiler, + for IBM RS/6000 POWER running AIX V5.2. + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. + Contributed by David Edelsohn (edelsohn@gnu.org). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to + get control in TARGET_OPTION_OVERRIDE. */ + +#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2) +#define SUBTARGET_OVERRIDE_OPTIONS \ +do { \ + if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \ + { \ + target_flags &= ~NON_POWERPC_MASKS; \ + warning (0, "-maix64 and POWER architecture are incompatible"); \ + } \ + if (TARGET_64BIT && ! TARGET_POWERPC64) \ + { \ + target_flags |= MASK_POWERPC64; \ + warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \ + } \ + if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \ + { \ + rs6000_long_double_type_size = 64; \ + if (rs6000_explicit_options.long_double) \ + warning (0, "soft-float and long-double-128 are incompatible"); \ + } \ + if (TARGET_POWERPC64 && ! TARGET_64BIT) \ + { \ + error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \ + } \ +} while (0); + +#undef ASM_SPEC +#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)" + +/* Common ASM definitions used by ASM_SPEC amongst the various targets + for handling -mcpu=xxx switches. */ +#undef ASM_CPU_SPEC +#define ASM_CPU_SPEC \ +"%{!mcpu*: %{!maix64: \ + %{mpowerpc64: -mppc64} \ + %{!mpower64: %(asm_default)}}} \ +%{mcpu=power3: -m620} \ +%{mcpu=power4: -m620} \ +%{mcpu=power5: -m620} \ +%{mcpu=power5+: -m620} \ +%{mcpu=power6: -m620} \ +%{mcpu=power6x: -m620} \ +%{mcpu=powerpc: -mppc} \ +%{mcpu=rs64a: -mppc} \ +%{mcpu=603: -m603} \ +%{mcpu=603e: -m603} \ +%{mcpu=604: -m604} \ +%{mcpu=604e: -m604} \ +%{mcpu=620: -m620} \ +%{mcpu=630: -m620} \ +%{mcpu=970: -m620} \ +%{mcpu=G5: -m620}" + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "-mppc" + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("_AIX43"); \ + builtin_define ("_AIX51"); \ + builtin_define ("_AIX52"); \ + TARGET_OS_AIX_CPP_BUILTINS (); \ + } \ + while (0) + +#undef CPP_SPEC +#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \ + %{ansi: -D_ANSI_C_SOURCE} \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +/* The GNU C++ standard library requires that these macros be + defined. Synchronize with libstdc++ os_defines.h. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC \ + "-D_ALL_SOURCE \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS) + +#undef PROCESSOR_DEFAULT +#define PROCESSOR_DEFAULT PROCESSOR_POWER4 +#undef PROCESSOR_DEFAULT64 +#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4 + +#undef TARGET_POWER +#define TARGET_POWER 0 + +/* AIX does not support Altivec. */ +#undef TARGET_ALTIVEC +#define TARGET_ALTIVEC 0 +#undef TARGET_ALTIVEC_ABI +#define TARGET_ALTIVEC_ABI 0 + +/* Define this macro as a C expression for the initializer of an + array of string to tell the driver program which options are + defaults for this target and thus do not need to be handled + specially when using `MULTILIB_OPTIONS'. + + Do not define this macro if `MULTILIB_OPTIONS' is not defined in + the target makefile fragment or if none of the options listed in + `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */ + +#undef MULTILIB_DEFAULTS + +#undef LIB_SPEC +#define LIB_SPEC "%{pg:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{p:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{!maix64:%{!shared:%{g*:-lg}}}\ + %{mpe:-L%R/usr/lpp/ppe.poe/lib -lmpi -lvtd}\ + %{pthread:-lpthreads} -lc" + +#undef LINK_SPEC +#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\ + %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\ + %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\ + %{mpe:-binitfini:poe_remote_main}" + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "%{!shared:\ + %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\ + %{!maix64:\ + %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\ + %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}" + +/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */ + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "long int" + +/* Type used for wchar_t, as a string used in a declaration. */ +#undef WCHAR_TYPE +#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int") + +/* Width of wchar_t in bits. */ +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32) + +/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC + and "cror 31,31,31" for POWER architecture. */ + +#undef RS6000_CALL_GLUE +#define RS6000_CALL_GLUE "{cror 31,31,31|nop}" + +/* AIX 4.2 and above provides initialization and finalization function + support from linker command line. */ +#undef HAS_INIT_SECTION +#define HAS_INIT_SECTION + +#undef LD_INIT_SWITCH +#define LD_INIT_SWITCH "-binitfini" + +/* AIX 5.2 has the float and long double forms of math functions. */ +#undef TARGET_C99_FUNCTIONS +#define TARGET_C99_FUNCTIONS 1 + +#ifndef _AIX52 +extern long long int atoll(const char *); +#endif + +/* This target uses the aix64.opt file. */ +#define TARGET_USES_AIX64_OPT 1 + +/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION, + but does not have crtbegin/end. */ + +#define TARGET_USE_JCR_SECTION 0 + +#define TARGET_AIX_VERSION 52 diff --git a/gcc/config/rs6000/aix53.h b/gcc/config/rs6000/aix53.h new file mode 100644 index 000000000..381e0d662 --- /dev/null +++ b/gcc/config/rs6000/aix53.h @@ -0,0 +1,199 @@ +/* Definitions of target machine for GNU compiler, + for IBM RS/6000 POWER running AIX V5.3. + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. + Contributed by David Edelsohn (edelsohn@gnu.org). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to + get control in TARGET_OPTION_OVERRIDE. */ + +#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2) +#define SUBTARGET_OVERRIDE_OPTIONS \ +do { \ + if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \ + { \ + target_flags &= ~NON_POWERPC_MASKS; \ + warning (0, "-maix64 and POWER architecture are incompatible"); \ + } \ + if (TARGET_64BIT && ! TARGET_POWERPC64) \ + { \ + target_flags |= MASK_POWERPC64; \ + warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \ + } \ + if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \ + { \ + rs6000_long_double_type_size = 64; \ + if (rs6000_explicit_options.long_double) \ + warning (0, "soft-float and long-double-128 are incompatible"); \ + } \ + if (TARGET_POWERPC64 && ! TARGET_64BIT) \ + { \ + error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \ + } \ +} while (0); + +#undef ASM_SPEC +#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)" + +/* Common ASM definitions used by ASM_SPEC amongst the various targets for + handling -mcpu=xxx switches. There is a parallel list in driver-rs6000.c to + provide the default assembler options if the user uses -mcpu=native, so if + you make changes here, make them there also. */ +#undef ASM_CPU_SPEC +#define ASM_CPU_SPEC \ +"%{!mcpu*: %{!maix64: \ + %{mpowerpc64: -mppc64} \ + %{maltivec: -m970} \ + %{!maltivec: %{!mpower64: %(asm_default)}}}} \ +%{mcpu=native: %(asm_cpu_native)} \ +%{mcpu=power3: -m620} \ +%{mcpu=power4: -mpwr4} \ +%{mcpu=power5: -mpwr5} \ +%{mcpu=power5+: -mpwr5x} \ +%{mcpu=power6: -mpwr6} \ +%{mcpu=power6x: -mpwr6} \ +%{mcpu=power7: -mpwr7} \ +%{mcpu=powerpc: -mppc} \ +%{mcpu=rs64a: -mppc} \ +%{mcpu=603: -m603} \ +%{mcpu=603e: -m603} \ +%{mcpu=604: -m604} \ +%{mcpu=604e: -m604} \ +%{mcpu=620: -m620} \ +%{mcpu=630: -m620} \ +%{mcpu=970: -m970} \ +%{mcpu=G5: -m970}" + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "-mppc" + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("_AIX43"); \ + builtin_define ("_AIX51"); \ + builtin_define ("_AIX52"); \ + builtin_define ("_AIX53"); \ + TARGET_OS_AIX_CPP_BUILTINS (); \ + } \ + while (0) + +#undef CPP_SPEC +#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \ + %{ansi: -D_ANSI_C_SOURCE} \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +/* The GNU C++ standard library requires that these macros be + defined. Synchronize with libstdc++ os_defines.h. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC \ + "-D_ALL_SOURCE \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS) + +#undef PROCESSOR_DEFAULT +#define PROCESSOR_DEFAULT PROCESSOR_POWER5 +#undef PROCESSOR_DEFAULT64 +#define PROCESSOR_DEFAULT64 PROCESSOR_POWER5 + +#undef TARGET_POWER +#define TARGET_POWER 0 + +/* Define this macro as a C expression for the initializer of an + array of string to tell the driver program which options are + defaults for this target and thus do not need to be handled + specially when using `MULTILIB_OPTIONS'. + + Do not define this macro if `MULTILIB_OPTIONS' is not defined in + the target makefile fragment or if none of the options listed in + `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */ + +#undef MULTILIB_DEFAULTS + +#undef LIB_SPEC +#define LIB_SPEC "%{pg:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{p:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{!maix64:%{!shared:%{g*:-lg}}}\ + %{mpe:-L%R/usr/lpp/ppe.poe/lib -lmpi -lvtd}\ + %{pthread:-lpthreads} -lc" + +#undef LINK_SPEC +#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\ + %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\ + %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\ + %{mpe:-binitfini:poe_remote_main}" + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "%{!shared:\ + %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\ + %{!maix64:\ + %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\ + %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}" + +/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */ + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "long int" + +/* Type used for wchar_t, as a string used in a declaration. */ +#undef WCHAR_TYPE +#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int") + +/* Width of wchar_t in bits. */ +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32) + +/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC + and "cror 31,31,31" for POWER architecture. */ + +#undef RS6000_CALL_GLUE +#define RS6000_CALL_GLUE "{cror 31,31,31|nop}" + +/* AIX 4.2 and above provides initialization and finalization function + support from linker command line. */ +#undef HAS_INIT_SECTION +#define HAS_INIT_SECTION + +#undef LD_INIT_SWITCH +#define LD_INIT_SWITCH "-binitfini" + +/* AIX 5.2 has the float and long double forms of math functions. */ +#undef TARGET_C99_FUNCTIONS +#define TARGET_C99_FUNCTIONS 1 + +#ifndef _AIX52 +extern long long int atoll(const char *); +#endif + +/* This target uses the aix64.opt file. */ +#define TARGET_USES_AIX64_OPT 1 + +/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION, + but does not have crtbegin/end. */ + +#define TARGET_USE_JCR_SECTION 0 + +#define TARGET_AIX_VERSION 53 diff --git a/gcc/config/rs6000/aix61.h b/gcc/config/rs6000/aix61.h new file mode 100644 index 000000000..2170eae55 --- /dev/null +++ b/gcc/config/rs6000/aix61.h @@ -0,0 +1,200 @@ +/* Definitions of target machine for GNU compiler, + for IBM RS/6000 POWER running AIX V6.1. + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. + Contributed by David Edelsohn (edelsohn@gnu.org). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to + get control in TARGET_OPTION_OVERRIDE. */ + +#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2) +#define SUBTARGET_OVERRIDE_OPTIONS \ +do { \ + if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \ + { \ + target_flags &= ~NON_POWERPC_MASKS; \ + warning (0, "-maix64 and POWER architecture are incompatible"); \ + } \ + if (TARGET_64BIT && ! TARGET_POWERPC64) \ + { \ + target_flags |= MASK_POWERPC64; \ + warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \ + } \ + if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \ + { \ + rs6000_long_double_type_size = 64; \ + if (rs6000_explicit_options.long_double) \ + warning (0, "soft-float and long-double-128 are incompatible"); \ + } \ + if (TARGET_POWERPC64 && ! TARGET_64BIT) \ + { \ + error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \ + } \ +} while (0); + +#undef ASM_SPEC +#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)" + +/* Common ASM definitions used by ASM_SPEC amongst the various targets for + handling -mcpu=xxx switches. There is a parallel list in driver-rs6000.c to + provide the default assembler options if the user uses -mcpu=native, so if + you make changes here, make them there also. */ +#undef ASM_CPU_SPEC +#define ASM_CPU_SPEC \ +"%{!mcpu*: %{!maix64: \ + %{mpowerpc64: -mppc64} \ + %{maltivec: -m970} \ + %{!maltivec: %{!mpower64: %(asm_default)}}}} \ +%{mcpu=native: %(asm_cpu_native)} \ +%{mcpu=power3: -m620} \ +%{mcpu=power4: -mpwr4} \ +%{mcpu=power5: -mpwr5} \ +%{mcpu=power5+: -mpwr5x} \ +%{mcpu=power6: -mpwr6} \ +%{mcpu=power6x: -mpwr6} \ +%{mcpu=power7: -mpwr7} \ +%{mcpu=powerpc: -mppc} \ +%{mcpu=rs64a: -mppc} \ +%{mcpu=603: -m603} \ +%{mcpu=603e: -m603} \ +%{mcpu=604: -m604} \ +%{mcpu=604e: -m604} \ +%{mcpu=620: -m620} \ +%{mcpu=630: -m620} \ +%{mcpu=970: -m970} \ +%{mcpu=G5: -m970}" + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "-mppc" + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("_AIX43"); \ + builtin_define ("_AIX51"); \ + builtin_define ("_AIX52"); \ + builtin_define ("_AIX53"); \ + builtin_define ("_AIX61"); \ + TARGET_OS_AIX_CPP_BUILTINS (); \ + } \ + while (0) + +#undef CPP_SPEC +#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \ + %{ansi: -D_ANSI_C_SOURCE} \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +/* The GNU C++ standard library requires that these macros be + defined. Synchronize with libstdc++ os_defines.h. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC \ + "-D_ALL_SOURCE -D__COMPATMATH__ \ + %{maix64: -D__64BIT__} \ + %{mpe: -I%R/usr/lpp/ppe.poe/include} \ + %{pthread: -D_THREAD_SAFE}" + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS) + +#undef PROCESSOR_DEFAULT +#define PROCESSOR_DEFAULT PROCESSOR_POWER7 +#undef PROCESSOR_DEFAULT64 +#define PROCESSOR_DEFAULT64 PROCESSOR_POWER7 + +#undef TARGET_POWER +#define TARGET_POWER 0 + +/* Define this macro as a C expression for the initializer of an + array of string to tell the driver program which options are + defaults for this target and thus do not need to be handled + specially when using `MULTILIB_OPTIONS'. + + Do not define this macro if `MULTILIB_OPTIONS' is not defined in + the target makefile fragment or if none of the options listed in + `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */ + +#undef MULTILIB_DEFAULTS + +#undef LIB_SPEC +#define LIB_SPEC "%{pg:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{p:-L%R/lib/profiled -L%R/usr/lib/profiled}\ + %{!maix64:%{!shared:%{g*:-lg}}}\ + %{mpe:-L%R/usr/lpp/ppe.poe/lib -lmpi -lvtd}\ + %{pthread:-lpthreads} -lc" + +#undef LINK_SPEC +#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\ + %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\ + %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\ + %{mpe:-binitfini:poe_remote_main}" + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "%{!shared:\ + %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\ + %{!maix64:\ + %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\ + %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}" + +/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */ + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "long int" + +/* Type used for wchar_t, as a string used in a declaration. */ +#undef WCHAR_TYPE +#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int") + +/* Width of wchar_t in bits. */ +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32) + +/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC + and "cror 31,31,31" for POWER architecture. */ + +#undef RS6000_CALL_GLUE +#define RS6000_CALL_GLUE "{cror 31,31,31|nop}" + +/* AIX 4.2 and above provides initialization and finalization function + support from linker command line. */ +#undef HAS_INIT_SECTION +#define HAS_INIT_SECTION + +#undef LD_INIT_SWITCH +#define LD_INIT_SWITCH "-binitfini" + +/* AIX 5.2 has the float and long double forms of math functions. */ +#undef TARGET_C99_FUNCTIONS +#define TARGET_C99_FUNCTIONS 1 + +#ifndef _AIX52 +extern long long int atoll(const char *); +#endif + +/* This target uses the aix64.opt file. */ +#define TARGET_USES_AIX64_OPT 1 + +/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION, + but does not have crtbegin/end. */ + +#define TARGET_USE_JCR_SECTION 0 + +#define TARGET_AIX_VERSION 61 diff --git a/gcc/config/rs6000/aix64.opt b/gcc/config/rs6000/aix64.opt new file mode 100644 index 000000000..9a10b200e --- /dev/null +++ b/gcc/config/rs6000/aix64.opt @@ -0,0 +1,38 @@ +; Options for the 64-bit flavor of AIX. +; +; Copyright (C) 2005, 2007, 2010, 2011 Free Software Foundation, Inc. +; Contributed by Aldy Hernandez . +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT +; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +; License for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; . + +maix64 +Target Report RejectNegative Negative(maix32) Mask(64BIT) +Compile for 64-bit pointers + +maix32 +Target Report RejectNegative Negative(maix64) InverseMask(64BIT) +Compile for 32-bit pointers + +mpe +Target Report RejectNegative Var(internal_nothing_1) Save +Support message passing with the Parallel Environment + +posix +Driver + +pthread +Driver diff --git a/gcc/config/rs6000/altivec.h b/gcc/config/rs6000/altivec.h new file mode 100644 index 000000000..583731b96 --- /dev/null +++ b/gcc/config/rs6000/altivec.h @@ -0,0 +1,493 @@ +/* PowerPC AltiVec include file. + Copyright (C) 2002, 2003, 2004, 2005, 2008, 2009, 2010, 2011 + Free Software Foundation, Inc. + Contributed by Aldy Hernandez (aldyh@redhat.com). + Rewritten by Paolo Bonzini (bonzini@gnu.org). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented to conform to the specification included in the AltiVec + Technology Programming Interface Manual (ALTIVECPIM/D 6/1999 Rev 0). */ + +#ifndef _ALTIVEC_H +#define _ALTIVEC_H 1 + +#if !defined(__VEC__) || !defined(__ALTIVEC__) +#error Use the "-maltivec" flag to enable PowerPC AltiVec support +#endif + +/* If __APPLE_ALTIVEC__ is defined, the compiler supports 'vector', + 'pixel' and 'bool' as context-sensitive AltiVec keywords (in + non-AltiVec contexts, they revert to their original meanings, + if any), so we do not need to define them as macros. */ + +#if !defined(__APPLE_ALTIVEC__) +/* You are allowed to undef these for C++ compatibility. */ +#define vector __vector +#define pixel __pixel +#define bool __bool +#endif + +/* Condition register codes for AltiVec predicates. */ + +#define __CR6_EQ 0 +#define __CR6_EQ_REV 1 +#define __CR6_LT 2 +#define __CR6_LT_REV 3 + +/* Synonyms. */ +#define vec_vaddcuw vec_addc +#define vec_vand vec_and +#define vec_vandc vec_andc +#define vec_vrfip vec_ceil +#define vec_vcmpbfp vec_cmpb +#define vec_vcmpgefp vec_cmpge +#define vec_vctsxs vec_cts +#define vec_vctuxs vec_ctu +#define vec_vexptefp vec_expte +#define vec_vrfim vec_floor +#define vec_lvx vec_ld +#define vec_lvxl vec_ldl +#define vec_vlogefp vec_loge +#define vec_vmaddfp vec_madd +#define vec_vmhaddshs vec_madds +#define vec_vmladduhm vec_mladd +#define vec_vmhraddshs vec_mradds +#define vec_vnmsubfp vec_nmsub +#define vec_vnor vec_nor +#define vec_vor vec_or +#define vec_vpkpx vec_packpx +#define vec_vperm vec_perm +#define vec_vrefp vec_re +#define vec_vrfin vec_round +#define vec_vrsqrtefp vec_rsqrte +#define vec_vsel vec_sel +#define vec_vsldoi vec_sld +#define vec_vsl vec_sll +#define vec_vslo vec_slo +#define vec_vspltisb vec_splat_s8 +#define vec_vspltish vec_splat_s16 +#define vec_vspltisw vec_splat_s32 +#define vec_vsr vec_srl +#define vec_vsro vec_sro +#define vec_stvx vec_st +#define vec_stvxl vec_stl +#define vec_vsubcuw vec_subc +#define vec_vsum2sws vec_sum2s +#define vec_vsumsws vec_sums +#define vec_vrfiz vec_trunc +#define vec_vxor vec_xor + +/* Functions that are resolved by the backend to one of the + typed builtins. */ +#define vec_vaddfp __builtin_vec_vaddfp +#define vec_addc __builtin_vec_addc +#define vec_vaddsws __builtin_vec_vaddsws +#define vec_vaddshs __builtin_vec_vaddshs +#define vec_vaddsbs __builtin_vec_vaddsbs +#define vec_vavgsw __builtin_vec_vavgsw +#define vec_vavguw __builtin_vec_vavguw +#define vec_vavgsh __builtin_vec_vavgsh +#define vec_vavguh __builtin_vec_vavguh +#define vec_vavgsb __builtin_vec_vavgsb +#define vec_vavgub __builtin_vec_vavgub +#define vec_ceil __builtin_vec_ceil +#define vec_cmpb __builtin_vec_cmpb +#define vec_vcmpeqfp __builtin_vec_vcmpeqfp +#define vec_cmpge __builtin_vec_cmpge +#define vec_vcmpgtfp __builtin_vec_vcmpgtfp +#define vec_vcmpgtsw __builtin_vec_vcmpgtsw +#define vec_vcmpgtuw __builtin_vec_vcmpgtuw +#define vec_vcmpgtsh __builtin_vec_vcmpgtsh +#define vec_vcmpgtuh __builtin_vec_vcmpgtuh +#define vec_vcmpgtsb __builtin_vec_vcmpgtsb +#define vec_vcmpgtub __builtin_vec_vcmpgtub +#define vec_vcfsx __builtin_vec_vcfsx +#define vec_vcfux __builtin_vec_vcfux +#define vec_cts __builtin_vec_cts +#define vec_ctu __builtin_vec_ctu +#define vec_expte __builtin_vec_expte +#define vec_floor __builtin_vec_floor +#define vec_loge __builtin_vec_loge +#define vec_madd __builtin_vec_madd +#define vec_madds __builtin_vec_madds +#define vec_mtvscr __builtin_vec_mtvscr +#define vec_vmaxfp __builtin_vec_vmaxfp +#define vec_vmaxsw __builtin_vec_vmaxsw +#define vec_vmaxsh __builtin_vec_vmaxsh +#define vec_vmaxsb __builtin_vec_vmaxsb +#define vec_vminfp __builtin_vec_vminfp +#define vec_vminsw __builtin_vec_vminsw +#define vec_vminsh __builtin_vec_vminsh +#define vec_vminsb __builtin_vec_vminsb +#define vec_mradds __builtin_vec_mradds +#define vec_vmsumshm __builtin_vec_vmsumshm +#define vec_vmsumuhm __builtin_vec_vmsumuhm +#define vec_vmsummbm __builtin_vec_vmsummbm +#define vec_vmsumubm __builtin_vec_vmsumubm +#define vec_vmsumshs __builtin_vec_vmsumshs +#define vec_vmsumuhs __builtin_vec_vmsumuhs +#define vec_vmulesb __builtin_vec_vmulesb +#define vec_vmulesh __builtin_vec_vmulesh +#define vec_vmuleuh __builtin_vec_vmuleuh +#define vec_vmuleub __builtin_vec_vmuleub +#define vec_vmulosh __builtin_vec_vmulosh +#define vec_vmulouh __builtin_vec_vmulouh +#define vec_vmulosb __builtin_vec_vmulosb +#define vec_vmuloub __builtin_vec_vmuloub +#define vec_nmsub __builtin_vec_nmsub +#define vec_packpx __builtin_vec_packpx +#define vec_vpkswss __builtin_vec_vpkswss +#define vec_vpkuwus __builtin_vec_vpkuwus +#define vec_vpkshss __builtin_vec_vpkshss +#define vec_vpkuhus __builtin_vec_vpkuhus +#define vec_vpkswus __builtin_vec_vpkswus +#define vec_vpkshus __builtin_vec_vpkshus +#define vec_re __builtin_vec_re +#define vec_round __builtin_vec_round +#define vec_recipdiv __builtin_vec_recipdiv +#define vec_rsqrt __builtin_vec_rsqrt +#define vec_rsqrte __builtin_vec_rsqrte +#define vec_vsubfp __builtin_vec_vsubfp +#define vec_subc __builtin_vec_subc +#define vec_vsubsws __builtin_vec_vsubsws +#define vec_vsubshs __builtin_vec_vsubshs +#define vec_vsubsbs __builtin_vec_vsubsbs +#define vec_sum4s __builtin_vec_sum4s +#define vec_vsum4shs __builtin_vec_vsum4shs +#define vec_vsum4sbs __builtin_vec_vsum4sbs +#define vec_vsum4ubs __builtin_vec_vsum4ubs +#define vec_sum2s __builtin_vec_sum2s +#define vec_sums __builtin_vec_sums +#define vec_trunc __builtin_vec_trunc +#define vec_vupkhpx __builtin_vec_vupkhpx +#define vec_vupkhsh __builtin_vec_vupkhsh +#define vec_vupkhsb __builtin_vec_vupkhsb +#define vec_vupklpx __builtin_vec_vupklpx +#define vec_vupklsh __builtin_vec_vupklsh +#define vec_vupklsb __builtin_vec_vupklsb +#define vec_abs __builtin_vec_abs +#define vec_abss __builtin_vec_abss +#define vec_add __builtin_vec_add +#define vec_adds __builtin_vec_adds +#define vec_and __builtin_vec_and +#define vec_andc __builtin_vec_andc +#define vec_avg __builtin_vec_avg +#define vec_cmpeq __builtin_vec_cmpeq +#define vec_cmpgt __builtin_vec_cmpgt +#define vec_ctf __builtin_vec_ctf +#define vec_dst __builtin_vec_dst +#define vec_dstst __builtin_vec_dstst +#define vec_dststt __builtin_vec_dststt +#define vec_dstt __builtin_vec_dstt +#define vec_ld __builtin_vec_ld +#define vec_lde __builtin_vec_lde +#define vec_ldl __builtin_vec_ldl +#define vec_lvebx __builtin_vec_lvebx +#define vec_lvehx __builtin_vec_lvehx +#define vec_lvewx __builtin_vec_lvewx +/* Cell only intrinsics. */ +#ifdef __PPU__ +#define vec_lvlx __builtin_vec_lvlx +#define vec_lvlxl __builtin_vec_lvlxl +#define vec_lvrx __builtin_vec_lvrx +#define vec_lvrxl __builtin_vec_lvrxl +#endif +#define vec_lvsl __builtin_vec_lvsl +#define vec_lvsr __builtin_vec_lvsr +#define vec_max __builtin_vec_max +#define vec_mergeh __builtin_vec_mergeh +#define vec_mergel __builtin_vec_mergel +#define vec_min __builtin_vec_min +#define vec_mladd __builtin_vec_mladd +#define vec_msum __builtin_vec_msum +#define vec_msums __builtin_vec_msums +#define vec_mule __builtin_vec_mule +#define vec_mulo __builtin_vec_mulo +#define vec_nor __builtin_vec_nor +#define vec_or __builtin_vec_or +#define vec_pack __builtin_vec_pack +#define vec_packs __builtin_vec_packs +#define vec_packsu __builtin_vec_packsu +#define vec_perm __builtin_vec_perm +#define vec_rl __builtin_vec_rl +#define vec_sel __builtin_vec_sel +#define vec_sl __builtin_vec_sl +#define vec_sld __builtin_vec_sld +#define vec_sll __builtin_vec_sll +#define vec_slo __builtin_vec_slo +#define vec_splat __builtin_vec_splat +#define vec_sr __builtin_vec_sr +#define vec_sra __builtin_vec_sra +#define vec_srl __builtin_vec_srl +#define vec_sro __builtin_vec_sro +#define vec_st __builtin_vec_st +#define vec_ste __builtin_vec_ste +#define vec_stl __builtin_vec_stl +#define vec_stvebx __builtin_vec_stvebx +#define vec_stvehx __builtin_vec_stvehx +#define vec_stvewx __builtin_vec_stvewx +/* Cell only intrinsics. */ +#ifdef __PPU__ +#define vec_stvlx __builtin_vec_stvlx +#define vec_stvlxl __builtin_vec_stvlxl +#define vec_stvrx __builtin_vec_stvrx +#define vec_stvrxl __builtin_vec_stvrxl +#endif +#define vec_sub __builtin_vec_sub +#define vec_subs __builtin_vec_subs +#define vec_sum __builtin_vec_sum +#define vec_unpackh __builtin_vec_unpackh +#define vec_unpackl __builtin_vec_unpackl +#define vec_vaddubm __builtin_vec_vaddubm +#define vec_vaddubs __builtin_vec_vaddubs +#define vec_vadduhm __builtin_vec_vadduhm +#define vec_vadduhs __builtin_vec_vadduhs +#define vec_vadduwm __builtin_vec_vadduwm +#define vec_vadduws __builtin_vec_vadduws +#define vec_vcmpequb __builtin_vec_vcmpequb +#define vec_vcmpequh __builtin_vec_vcmpequh +#define vec_vcmpequw __builtin_vec_vcmpequw +#define vec_vmaxub __builtin_vec_vmaxub +#define vec_vmaxuh __builtin_vec_vmaxuh +#define vec_vmaxuw __builtin_vec_vmaxuw +#define vec_vminub __builtin_vec_vminub +#define vec_vminuh __builtin_vec_vminuh +#define vec_vminuw __builtin_vec_vminuw +#define vec_vmrghb __builtin_vec_vmrghb +#define vec_vmrghh __builtin_vec_vmrghh +#define vec_vmrghw __builtin_vec_vmrghw +#define vec_vmrglb __builtin_vec_vmrglb +#define vec_vmrglh __builtin_vec_vmrglh +#define vec_vmrglw __builtin_vec_vmrglw +#define vec_vpkuhum __builtin_vec_vpkuhum +#define vec_vpkuwum __builtin_vec_vpkuwum +#define vec_vrlb __builtin_vec_vrlb +#define vec_vrlh __builtin_vec_vrlh +#define vec_vrlw __builtin_vec_vrlw +#define vec_vslb __builtin_vec_vslb +#define vec_vslh __builtin_vec_vslh +#define vec_vslw __builtin_vec_vslw +#define vec_vspltb __builtin_vec_vspltb +#define vec_vsplth __builtin_vec_vsplth +#define vec_vspltw __builtin_vec_vspltw +#define vec_vsrab __builtin_vec_vsrab +#define vec_vsrah __builtin_vec_vsrah +#define vec_vsraw __builtin_vec_vsraw +#define vec_vsrb __builtin_vec_vsrb +#define vec_vsrh __builtin_vec_vsrh +#define vec_vsrw __builtin_vec_vsrw +#define vec_vsububs __builtin_vec_vsububs +#define vec_vsububm __builtin_vec_vsububm +#define vec_vsubuhm __builtin_vec_vsubuhm +#define vec_vsubuhs __builtin_vec_vsubuhs +#define vec_vsubuwm __builtin_vec_vsubuwm +#define vec_vsubuws __builtin_vec_vsubuws +#define vec_xor __builtin_vec_xor + +#define vec_extract __builtin_vec_extract +#define vec_insert __builtin_vec_insert +#define vec_splats __builtin_vec_splats +#define vec_promote __builtin_vec_promote + +#ifdef __VSX__ +/* VSX additions */ +#define vec_div __builtin_vec_div +#define vec_mul __builtin_vec_mul +#define vec_msub __builtin_vec_msub +#define vec_nmadd __builtin_vec_nmadd +#define vec_nearbyint __builtin_vec_nearbyint +#define vec_rint __builtin_vec_rint +#define vec_sqrt __builtin_vec_sqrt +#define vec_vsx_ld __builtin_vec_vsx_ld +#define vec_vsx_st __builtin_vec_vsx_st +#endif + +/* Predicates. + For C++, we use templates in order to allow non-parenthesized arguments. + For C, instead, we use macros since non-parenthesized arguments were + not allowed even in older GCC implementation of AltiVec. + + In the future, we may add more magic to the back-end, so that no + one- or two-argument macros are used. */ + +#ifdef __cplusplus__ +#define __altivec_unary_pred(NAME, CALL) \ +template int NAME (T a1) { return CALL; } + +#define __altivec_scalar_pred(NAME, CALL) \ +template int NAME (T a1, U a2) { return CALL; } + +/* Given the vec_step of a type, return the corresponding bool type. */ +template class __altivec_bool_ret { }; +template <> class __altivec_bool_ret <4> { + typedef __vector __bool int __ret; +}; +template <> class __altivec_bool_ret <8> { + typedef __vector __bool short __ret; +}; +template <> class __altivec_bool_ret <16> { + typedef __vector __bool char __ret; +}; + +/* Be very liberal in the pairs we accept. Mistakes such as passing + a `vector char' and `vector short' will be caught by the middle-end, + while any attempt to detect them here would produce hard to understand + error messages involving the implementation details of AltiVec. */ +#define __altivec_binary_pred(NAME, CALL) \ +template \ +typename __altivec_bool_ret ::__ret \ +NAME (T a1, U a2) \ +{ \ + return CALL; \ +} + +__altivec_binary_pred(vec_cmplt, + __builtin_vec_cmpgt (a2, a1)) +__altivec_binary_pred(vec_cmple, + __builtin_vec_cmpge (a2, a1)) + +__altivec_scalar_pred(vec_all_in, + __builtin_altivec_vcmpbfp_p (__CR6_EQ, a1, a2)) +__altivec_scalar_pred(vec_any_out, + __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, a1, a2)) + +__altivec_unary_pred(vec_all_nan, + __builtin_altivec_vcmpeq_p (__CR6_EQ, a1, a1)) +__altivec_unary_pred(vec_any_nan, + __builtin_altivec_vcmpeq_p (__CR6_LT_REV, a1, a1)) + +__altivec_unary_pred(vec_all_numeric, + __builtin_altivec_vcmpeq_p (__CR6_LT, a1, a1)) +__altivec_unary_pred(vec_any_numeric, + __builtin_altivec_vcmpeq_p (__CR6_EQ_REV, a1, a1)) + +__altivec_scalar_pred(vec_all_eq, + __builtin_vec_vcmpeq_p (__CR6_LT, a1, a2)) +__altivec_scalar_pred(vec_all_ne, + __builtin_vec_vcmpeq_p (__CR6_EQ, a1, a2)) +__altivec_scalar_pred(vec_any_eq, + __builtin_vec_vcmpeq_p (__CR6_EQ_REV, a1, a2)) +__altivec_scalar_pred(vec_any_ne, + __builtin_vec_vcmpeq_p (__CR6_LT_REV, a1, a2)) + +__altivec_scalar_pred(vec_all_gt, + __builtin_vec_vcmpgt_p (__CR6_LT, a1, a2)) +__altivec_scalar_pred(vec_all_lt, + __builtin_vec_vcmpgt_p (__CR6_LT, a2, a1)) +__altivec_scalar_pred(vec_any_gt, + __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a1, a2)) +__altivec_scalar_pred(vec_any_lt, + __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a2, a1)) + +__altivec_scalar_pred(vec_all_ngt, + __builtin_altivec_vcmpgt_p (__CR6_EQ, a1, a2)) +__altivec_scalar_pred(vec_all_nlt, + __builtin_altivec_vcmpgt_p (__CR6_EQ, a2, a1)) +__altivec_scalar_pred(vec_any_ngt, + __builtin_altivec_vcmpgt_p (__CR6_LT_REV, a1, a2)) +__altivec_scalar_pred(vec_any_nlt, + __builtin_altivec_vcmpgt_p (__CR6_LT_REV, a2, a1)) + +/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types, + while for integer types it is converted to __builtin_vec_vcmpgt_p, + with inverted args and condition code. */ +__altivec_scalar_pred(vec_all_le, + __builtin_vec_vcmpge_p (__CR6_LT, a2, a1)) +__altivec_scalar_pred(vec_all_ge, + __builtin_vec_vcmpge_p (__CR6_LT, a1, a2)) +__altivec_scalar_pred(vec_any_le, + __builtin_vec_vcmpge_p (__CR6_EQ_REV, a2, a1)) +__altivec_scalar_pred(vec_any_ge, + __builtin_vec_vcmpge_p (__CR6_EQ_REV, a1, a2)) + +__altivec_scalar_pred(vec_all_nge, + __builtin_altivec_vcmpge_p (__CR6_EQ, a1, a2)) +__altivec_scalar_pred(vec_all_nle, + __builtin_altivec_vcmpge_p (__CR6_EQ, a2, a1)) +__altivec_scalar_pred(vec_any_nge, + __builtin_altivec_vcmpge_p (__CR6_LT_REV, a1, a2)) +__altivec_scalar_pred(vec_any_nle, + __builtin_altivec_vcmpge_p (__CR6_LT_REV, a2, a1)) + +#undef __altivec_scalar_pred +#undef __altivec_unary_pred +#undef __altivec_binary_pred +#else +#define vec_cmplt(a1, a2) __builtin_vec_cmpgt ((a2), (a1)) +#define vec_cmple(a1, a2) __builtin_vec_cmpge ((a2), (a1)) + +#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2)) +#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2)) + +#define vec_all_nan(a1) __builtin_vec_vcmpeq_p (__CR6_EQ, (a1), (a1)) +#define vec_any_nan(a1) __builtin_vec_vcmpeq_p (__CR6_LT_REV, (a1), (a1)) + +#define vec_all_numeric(a1) __builtin_vec_vcmpeq_p (__CR6_LT, (a1), (a1)) +#define vec_any_numeric(a1) __builtin_vec_vcmpeq_p (__CR6_EQ_REV, (a1), (a1)) + +#define vec_all_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT, (a1), (a2)) +#define vec_all_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ, (a1), (a2)) +#define vec_any_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ_REV, (a1), (a2)) +#define vec_any_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT_REV, (a1), (a2)) + +#define vec_all_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a1), (a2)) +#define vec_all_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a2), (a1)) +#define vec_any_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a1), (a2)) +#define vec_any_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a2), (a1)) + +#define vec_all_ngt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ, (a1), (a2)) +#define vec_all_nlt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ, (a2), (a1)) +#define vec_any_ngt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT_REV, (a1), (a2)) +#define vec_any_nlt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT_REV, (a2), (a1)) + +/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types, + while for integer types it is converted to __builtin_vec_vcmpgt_p, + with inverted args and condition code. */ +#define vec_all_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a2), (a1)) +#define vec_all_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a1), (a2)) +#define vec_any_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a2), (a1)) +#define vec_any_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a1), (a2)) + +#define vec_all_nge(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ, (a1), (a2)) +#define vec_all_nle(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ, (a2), (a1)) +#define vec_any_nge(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT_REV, (a1), (a2)) +#define vec_any_nle(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT_REV, (a2), (a1)) +#endif + +/* These do not accept vectors, so they do not have a __builtin_vec_* + counterpart. */ +#define vec_dss(x) __builtin_altivec_dss((x)) +#define vec_dssall() __builtin_altivec_dssall () +#define vec_mfvscr() ((__vector unsigned short) __builtin_altivec_mfvscr ()) +#define vec_splat_s8(x) __builtin_altivec_vspltisb ((x)) +#define vec_splat_s16(x) __builtin_altivec_vspltish ((x)) +#define vec_splat_s32(x) __builtin_altivec_vspltisw ((x)) +#define vec_splat_u8(x) ((__vector unsigned char) vec_splat_s8 ((x))) +#define vec_splat_u16(x) ((__vector unsigned short) vec_splat_s16 ((x))) +#define vec_splat_u32(x) ((__vector unsigned int) vec_splat_s32 ((x))) + +/* This also accepts a type for its parameter, so it is not enough + to #define vec_step to __builtin_vec_step. */ +#define vec_step(x) __builtin_vec_step (* (__typeof__ (x) *) 0) + +#endif /* _ALTIVEC_H */ diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md new file mode 100644 index 000000000..9fbced173 --- /dev/null +++ b/gcc/config/rs6000/altivec.md @@ -0,0 +1,2749 @@ +;; AltiVec patterns. +;; Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 +;; Free Software Foundation, Inc. +;; Contributed by Aldy Hernandez (aldy@quesejoda.com) + +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_constants + ;; 51-62 deleted + [(UNSPEC_VCMPBFP 64) + (UNSPEC_VMSUMU 65) + (UNSPEC_VMSUMM 66) + (UNSPEC_VMSUMSHM 68) + (UNSPEC_VMSUMUHS 69) + (UNSPEC_VMSUMSHS 70) + (UNSPEC_VMHADDSHS 71) + (UNSPEC_VMHRADDSHS 72) + (UNSPEC_VMLADDUHM 73) + (UNSPEC_VADDCUW 75) + (UNSPEC_VADDU 76) + (UNSPEC_VADDS 77) + (UNSPEC_VAVGU 80) + (UNSPEC_VAVGS 81) + (UNSPEC_VMULEUB 83) + (UNSPEC_VMULESB 84) + (UNSPEC_VMULEUH 85) + (UNSPEC_VMULESH 86) + (UNSPEC_VMULOUB 87) + (UNSPEC_VMULOSB 88) + (UNSPEC_VMULOUH 89) + (UNSPEC_VMULOSH 90) + (UNSPEC_VPKUHUM 93) + (UNSPEC_VPKUWUM 94) + (UNSPEC_VPKPX 95) + (UNSPEC_VPKSHSS 97) + (UNSPEC_VPKSWSS 99) + (UNSPEC_VPKUHUS 100) + (UNSPEC_VPKSHUS 101) + (UNSPEC_VPKUWUS 102) + (UNSPEC_VPKSWUS 103) + ;; 104 deleted + (UNSPEC_VSLV4SI 110) + (UNSPEC_VSLO 111) + (UNSPEC_VSR 118) + (UNSPEC_VSRO 119) + (UNSPEC_VSUBCUW 124) + (UNSPEC_VSUBU 125) + (UNSPEC_VSUBS 126) + (UNSPEC_VSUM4UBS 131) + (UNSPEC_VSUM4S 132) + (UNSPEC_VSUM2SWS 134) + (UNSPEC_VSUMSWS 135) + (UNSPEC_VPERM 144) + (UNSPEC_VPERM_UNS 145) + ;; 148 deleted + (UNSPEC_VRFIN 149) + ;; 150 deleted + (UNSPEC_VCFUX 151) + (UNSPEC_VCFSX 152) + (UNSPEC_VCTUXS 153) + (UNSPEC_VCTSXS 154) + (UNSPEC_VLOGEFP 155) + (UNSPEC_VEXPTEFP 156) + ;; 157-162 deleted + (UNSPEC_VLSDOI 163) + (UNSPEC_VUPKHSB 167) + (UNSPEC_VUPKHPX 168) + (UNSPEC_VUPKHSH 169) + (UNSPEC_VUPKLSB 170) + (UNSPEC_VUPKLPX 171) + (UNSPEC_VUPKLSH 172) + ;; 173 deleted + (UNSPEC_DST 190) + (UNSPEC_DSTT 191) + (UNSPEC_DSTST 192) + (UNSPEC_DSTSTT 193) + (UNSPEC_LVSL 194) + (UNSPEC_LVSR 195) + (UNSPEC_LVE 196) + (UNSPEC_STVX 201) + (UNSPEC_STVXL 202) + (UNSPEC_STVE 203) + (UNSPEC_SET_VSCR 213) + (UNSPEC_GET_VRSAVE 214) + (UNSPEC_LVX 215) + (UNSPEC_REDUC_PLUS 217) + (UNSPEC_VECSH 219) + (UNSPEC_EXTEVEN_V4SI 220) + (UNSPEC_EXTEVEN_V8HI 221) + (UNSPEC_EXTEVEN_V16QI 222) + (UNSPEC_EXTEVEN_V4SF 223) + (UNSPEC_EXTODD_V4SI 224) + (UNSPEC_EXTODD_V8HI 225) + (UNSPEC_EXTODD_V16QI 226) + (UNSPEC_EXTODD_V4SF 227) + (UNSPEC_INTERHI_V4SI 228) + (UNSPEC_INTERHI_V8HI 229) + (UNSPEC_INTERHI_V16QI 230) + ;; delete 231 + (UNSPEC_INTERLO_V4SI 232) + (UNSPEC_INTERLO_V8HI 233) + (UNSPEC_INTERLO_V16QI 234) + ;; delete 235 + (UNSPEC_LVLX 236) + (UNSPEC_LVLXL 237) + (UNSPEC_LVRX 238) + (UNSPEC_LVRXL 239) + (UNSPEC_STVLX 240) + (UNSPEC_STVLXL 241) + (UNSPEC_STVRX 242) + (UNSPEC_STVRXL 243) + (UNSPEC_VMULWHUB 308) + (UNSPEC_VMULWLUB 309) + (UNSPEC_VMULWHSB 310) + (UNSPEC_VMULWLSB 311) + (UNSPEC_VMULWHUH 312) + (UNSPEC_VMULWLUH 313) + (UNSPEC_VMULWHSH 314) + (UNSPEC_VMULWLSH 315) + (UNSPEC_VUPKHUB 316) + (UNSPEC_VUPKHUH 317) + (UNSPEC_VUPKLUB 318) + (UNSPEC_VUPKLUH 319) + (UNSPEC_VPERMSI 320) + (UNSPEC_VPERMHI 321) + (UNSPEC_INTERHI 322) + (UNSPEC_INTERLO 323) + (UNSPEC_VUPKHS_V4SF 324) + (UNSPEC_VUPKLS_V4SF 325) + (UNSPEC_VUPKHU_V4SF 326) + (UNSPEC_VUPKLU_V4SF 327) +]) + +(define_constants + [(UNSPECV_SET_VRSAVE 30) + (UNSPECV_MTVSCR 186) + (UNSPECV_MFVSCR 187) + (UNSPECV_DSSALL 188) + (UNSPECV_DSS 189) + ]) + +;; Vec int modes +(define_mode_iterator VI [V4SI V8HI V16QI]) +;; Short vec in modes +(define_mode_iterator VIshort [V8HI V16QI]) +;; Vec float modes +(define_mode_iterator VF [V4SF]) +;; Vec modes, pity mode iterators are not composable +(define_mode_iterator V [V4SI V8HI V16QI V4SF]) +;; Vec modes for move/logical/permute ops, include vector types for move not +;; otherwise handled by altivec (v2df, v2di, ti) +(define_mode_iterator VM [V4SI V8HI V16QI V4SF V2DF V2DI TI]) + +;; Like VM, except don't do TImode +(define_mode_iterator VM2 [V4SI V8HI V16QI V4SF V2DF V2DI]) + +(define_mode_attr VI_char [(V4SI "w") (V8HI "h") (V16QI "b")]) +(define_mode_attr VI_scalar [(V4SI "SI") (V8HI "HI") (V16QI "QI")]) + +;; Vector move instructions. +(define_insn "*altivec_mov" + [(set (match_operand:VM2 0 "nonimmediate_operand" "=Z,v,v,*o,*r,*r,v,v") + (match_operand:VM2 1 "input_operand" "v,Z,v,r,o,r,j,W"))] + "VECTOR_MEM_ALTIVEC_P (mode) + && (register_operand (operands[0], mode) + || register_operand (operands[1], mode))" +{ + switch (which_alternative) + { + case 0: return "stvx %1,%y0"; + case 1: return "lvx %0,%y1"; + case 2: return "vor %0,%1,%1"; + case 3: return "#"; + case 4: return "#"; + case 5: return "#"; + case 6: return "vxor %0,%0,%0"; + case 7: return output_vec_const_move (operands); + default: gcc_unreachable (); + } +} + [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,vecsimple,*")]) + +;; Unlike other altivec moves, allow the GPRs, since a normal use of TImode +;; is for unions. However for plain data movement, slightly favor the vector +;; loads +(define_insn "*altivec_movti" + [(set (match_operand:TI 0 "nonimmediate_operand" "=Z,v,v,?o,?r,?r,v,v") + (match_operand:TI 1 "input_operand" "v,Z,v,r,o,r,j,W"))] + "VECTOR_MEM_ALTIVEC_P (TImode) + && (register_operand (operands[0], TImode) + || register_operand (operands[1], TImode))" +{ + switch (which_alternative) + { + case 0: return "stvx %1,%y0"; + case 1: return "lvx %0,%y1"; + case 2: return "vor %0,%1,%1"; + case 3: return "#"; + case 4: return "#"; + case 5: return "#"; + case 6: return "vxor %0,%0,%0"; + case 7: return output_vec_const_move (operands); + default: gcc_unreachable (); + } +} + [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,vecsimple,*")]) + +;; Load up a vector with the most significant bit set by loading up -1 and +;; doing a shift left +(define_split + [(set (match_operand:VM 0 "altivec_register_operand" "") + (match_operand:VM 1 "easy_vector_constant_msb" ""))] + "VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode) && reload_completed" + [(const_int 0)] +{ + rtx dest = operands[0]; + enum machine_mode mode = GET_MODE (operands[0]); + rtvec v; + int i, num_elements; + + if (mode == V4SFmode) + { + mode = V4SImode; + dest = gen_lowpart (V4SImode, dest); + } + + num_elements = GET_MODE_NUNITS (mode); + v = rtvec_alloc (num_elements); + for (i = 0; i < num_elements; i++) + RTVEC_ELT (v, i) = constm1_rtx; + + emit_insn (gen_vec_initv4si (dest, gen_rtx_PARALLEL (mode, v))); + emit_insn (gen_rtx_SET (VOIDmode, dest, gen_rtx_ASHIFT (mode, dest, dest))); + DONE; +}) + +(define_split + [(set (match_operand:VM 0 "altivec_register_operand" "") + (match_operand:VM 1 "easy_vector_constant_add_self" ""))] + "VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode) && reload_completed" + [(set (match_dup 0) (match_dup 3)) + (set (match_dup 0) (match_dup 4))] +{ + rtx dup = gen_easy_altivec_constant (operands[1]); + rtx const_vec; + enum machine_mode op_mode = mode; + + /* Divide the operand of the resulting VEC_DUPLICATE, and use + simplify_rtx to make a CONST_VECTOR. */ + XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode, + XEXP (dup, 0), const1_rtx); + const_vec = simplify_rtx (dup); + + if (op_mode == V4SFmode) + { + op_mode = V4SImode; + operands[0] = gen_lowpart (op_mode, operands[0]); + } + if (GET_MODE (const_vec) == op_mode) + operands[3] = const_vec; + else + operands[3] = gen_lowpart (op_mode, const_vec); + operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]); +}) + +(define_insn "get_vrsave_internal" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(reg:SI 109)] UNSPEC_GET_VRSAVE))] + "TARGET_ALTIVEC" +{ + if (TARGET_MACHO) + return "mfspr %0,256"; + else + return "mfvrsave %0"; +} + [(set_attr "type" "*")]) + +(define_insn "*set_vrsave_internal" + [(match_parallel 0 "vrsave_operation" + [(set (reg:SI 109) + (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r") + (reg:SI 109)] UNSPECV_SET_VRSAVE))])] + "TARGET_ALTIVEC" +{ + if (TARGET_MACHO) + return "mtspr 256,%1"; + else + return "mtvrsave %1"; +} + [(set_attr "type" "*")]) + +(define_insn "*save_world" + [(match_parallel 0 "save_world_operation" + [(clobber (reg:SI 65)) + (use (match_operand:SI 1 "call_operand" "s"))])] + "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT" + "bl %z1" + [(set_attr "type" "branch") + (set_attr "length" "4")]) + +(define_insn "*restore_world" + [(match_parallel 0 "restore_world_operation" + [(return) + (use (reg:SI 65)) + (use (match_operand:SI 1 "call_operand" "s")) + (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])] + "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT" + "b %z1") + +;; Simple binary operations. + +;; add +(define_insn "add3" + [(set (match_operand:VI 0 "register_operand" "=v") + (plus:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vaddum %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_addv4sf3" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (plus:V4SF (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vaddfp %0,%1,%2" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vaddcuw" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VADDCUW))] + "TARGET_ALTIVEC" + "vaddcuw %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vaddus" + [(set (match_operand:VI 0 "register_operand" "=v") + (unspec:VI [(match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")] + UNSPEC_VADDU)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vaddus %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vaddss" + [(set (match_operand:VI 0 "register_operand" "=v") + (unspec:VI [(match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")] + UNSPEC_VADDS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vaddss %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +;; sub +(define_insn "sub3" + [(set (match_operand:VI 0 "register_operand" "=v") + (minus:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vsubum %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_subv4sf3" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (minus:V4SF (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vsubfp %0,%1,%2" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vsubcuw" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSUBCUW))] + "TARGET_ALTIVEC" + "vsubcuw %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vsubus" + [(set (match_operand:VI 0 "register_operand" "=v") + (unspec:VI [(match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")] + UNSPEC_VSUBU)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vsubus %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vsubss" + [(set (match_operand:VI 0 "register_operand" "=v") + (unspec:VI [(match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")] + UNSPEC_VSUBS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vsubss %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +;; +(define_insn "altivec_vavgu" + [(set (match_operand:VI 0 "register_operand" "=v") + (unspec:VI [(match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")] + UNSPEC_VAVGU))] + "TARGET_ALTIVEC" + "vavgu %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vavgs" + [(set (match_operand:VI 0 "register_operand" "=v") + (unspec:VI [(match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")] + UNSPEC_VAVGS))] + "TARGET_ALTIVEC" + "vavgs %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vcmpbfp" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v")] + UNSPEC_VCMPBFP))] + "TARGET_ALTIVEC" + "vcmpbfp %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_eq" + [(set (match_operand:VI 0 "altivec_register_operand" "=v") + (eq:VI (match_operand:VI 1 "altivec_register_operand" "v") + (match_operand:VI 2 "altivec_register_operand" "v")))] + "TARGET_ALTIVEC" + "vcmpequ %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_gt" + [(set (match_operand:VI 0 "altivec_register_operand" "=v") + (gt:VI (match_operand:VI 1 "altivec_register_operand" "v") + (match_operand:VI 2 "altivec_register_operand" "v")))] + "TARGET_ALTIVEC" + "vcmpgts %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_gtu" + [(set (match_operand:VI 0 "altivec_register_operand" "=v") + (gtu:VI (match_operand:VI 1 "altivec_register_operand" "v") + (match_operand:VI 2 "altivec_register_operand" "v")))] + "TARGET_ALTIVEC" + "vcmpgtu %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_eqv4sf" + [(set (match_operand:V4SF 0 "altivec_register_operand" "=v") + (eq:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v") + (match_operand:V4SF 2 "altivec_register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vcmpeqfp %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_gtv4sf" + [(set (match_operand:V4SF 0 "altivec_register_operand" "=v") + (gt:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v") + (match_operand:V4SF 2 "altivec_register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vcmpgtfp %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_gev4sf" + [(set (match_operand:V4SF 0 "altivec_register_operand" "=v") + (ge:V4SF (match_operand:V4SF 1 "altivec_register_operand" "v") + (match_operand:V4SF 2 "altivec_register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vcmpgefp %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_vsel" + [(set (match_operand:VM 0 "altivec_register_operand" "=v") + (if_then_else:VM + (ne:CC (match_operand:VM 1 "altivec_register_operand" "v") + (match_operand:VM 4 "zero_constant" "")) + (match_operand:VM 2 "altivec_register_operand" "v") + (match_operand:VM 3 "altivec_register_operand" "v")))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vsel %0,%3,%2,%1" + [(set_attr "type" "vecperm")]) + +(define_insn "*altivec_vsel_uns" + [(set (match_operand:VM 0 "altivec_register_operand" "=v") + (if_then_else:VM + (ne:CCUNS (match_operand:VM 1 "altivec_register_operand" "v") + (match_operand:VM 4 "zero_constant" "")) + (match_operand:VM 2 "altivec_register_operand" "v") + (match_operand:VM 3 "altivec_register_operand" "v")))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vsel %0,%3,%2,%1" + [(set_attr "type" "vecperm")]) + +;; Fused multiply add. + +(define_insn "*altivec_fmav4sf4" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (fma:V4SF (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v") + (match_operand:V4SF 3 "register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vmaddfp %0,%1,%2,%3" + [(set_attr "type" "vecfloat")]) + +;; We do multiply as a fused multiply-add with an add of a -0.0 vector. + +(define_expand "altivec_mulv4sf3" + [(set (match_operand:V4SF 0 "register_operand" "") + (fma:V4SF (match_operand:V4SF 1 "register_operand" "") + (match_operand:V4SF 2 "register_operand" "") + (match_dup 3)))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" +{ + rtx neg0; + + /* Generate [-0.0, -0.0, -0.0, -0.0]. */ + neg0 = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx)); + emit_insn (gen_vashlv4si3 (neg0, neg0, neg0)); + + operands[3] = gen_lowpart (V4SFmode, neg0); +}) + +;; 32-bit integer multiplication +;; A_high = Operand_0 & 0xFFFF0000 >> 16 +;; A_low = Operand_0 & 0xFFFF +;; B_high = Operand_1 & 0xFFFF0000 >> 16 +;; B_low = Operand_1 & 0xFFFF +;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16 + +;; (define_insn "mulv4si3" +;; [(set (match_operand:V4SI 0 "register_operand" "=v") +;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v") +;; (match_operand:V4SI 2 "register_operand" "v")))] +(define_expand "mulv4si3" + [(use (match_operand:V4SI 0 "register_operand" "")) + (use (match_operand:V4SI 1 "register_operand" "")) + (use (match_operand:V4SI 2 "register_operand" ""))] + "TARGET_ALTIVEC" + " + { + rtx zero; + rtx swap; + rtx small_swap; + rtx sixteen; + rtx one; + rtx two; + rtx low_product; + rtx high_product; + + zero = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vspltisw (zero, const0_rtx)); + + sixteen = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16))); + + swap = gen_reg_rtx (V4SImode); + emit_insn (gen_vrotlv4si3 (swap, operands[2], sixteen)); + + one = gen_reg_rtx (V8HImode); + convert_move (one, operands[1], 0); + + two = gen_reg_rtx (V8HImode); + convert_move (two, operands[2], 0); + + small_swap = gen_reg_rtx (V8HImode); + convert_move (small_swap, swap, 0); + + low_product = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vmulouh (low_product, one, two)); + + high_product = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero)); + + emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen)); + + emit_insn (gen_addv4si3 (operands[0], high_product, low_product)); + + DONE; + }") + +(define_expand "mulv8hi3" + [(use (match_operand:V8HI 0 "register_operand" "")) + (use (match_operand:V8HI 1 "register_operand" "")) + (use (match_operand:V8HI 2 "register_operand" ""))] + "TARGET_ALTIVEC" + " +{ + rtx odd = gen_reg_rtx (V4SImode); + rtx even = gen_reg_rtx (V4SImode); + rtx high = gen_reg_rtx (V4SImode); + rtx low = gen_reg_rtx (V4SImode); + + emit_insn (gen_altivec_vmulesh (even, operands[1], operands[2])); + emit_insn (gen_altivec_vmulosh (odd, operands[1], operands[2])); + + emit_insn (gen_altivec_vmrghw (high, even, odd)); + emit_insn (gen_altivec_vmrglw (low, even, odd)); + + emit_insn (gen_altivec_vpkuwum (operands[0], high, low)); + + DONE; +}") + +;; Fused multiply subtract +(define_insn "*altivec_vnmsubfp" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (neg:V4SF + (fma:V4SF (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v") + (neg:V4SF + (match_operand:V4SF 3 "register_operand" "v")))))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vnmsubfp %0,%1,%2,%3" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vmsumum" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v") + (match_operand:VIshort 2 "register_operand" "v") + (match_operand:V4SI 3 "register_operand" "v")] + UNSPEC_VMSUMU))] + "TARGET_ALTIVEC" + "vmsumum %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmsummm" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v") + (match_operand:VIshort 2 "register_operand" "v") + (match_operand:V4SI 3 "register_operand" "v")] + UNSPEC_VMSUMM))] + "TARGET_ALTIVEC" + "vmsummm %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmsumshm" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v") + (match_operand:V4SI 3 "register_operand" "v")] + UNSPEC_VMSUMSHM))] + "TARGET_ALTIVEC" + "vmsumshm %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmsumuhs" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v") + (match_operand:V4SI 3 "register_operand" "v")] + UNSPEC_VMSUMUHS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vmsumuhs %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmsumshs" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v") + (match_operand:V4SI 3 "register_operand" "v")] + UNSPEC_VMSUMSHS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vmsumshs %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +;; max + +(define_insn "umax3" + [(set (match_operand:VI 0 "register_operand" "=v") + (umax:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vmaxu %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "smax3" + [(set (match_operand:VI 0 "register_operand" "=v") + (smax:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vmaxs %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_smaxv4sf3" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (smax:V4SF (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vmaxfp %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "umin3" + [(set (match_operand:VI 0 "register_operand" "=v") + (umin:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vminu %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "smin3" + [(set (match_operand:VI 0 "register_operand" "=v") + (smin:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vmins %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_sminv4sf3" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (smin:V4SF (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vminfp %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "altivec_vmhaddshs" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v") + (match_operand:V8HI 3 "register_operand" "v")] + UNSPEC_VMHADDSHS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vmhaddshs %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmhraddshs" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v") + (match_operand:V8HI 3 "register_operand" "v")] + UNSPEC_VMHRADDSHS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vmhraddshs %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmladduhm" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v") + (match_operand:V8HI 3 "register_operand" "v")] + UNSPEC_VMLADDUHM))] + "TARGET_ALTIVEC" + "vmladduhm %0,%1,%2,%3" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmrghb" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v") + (parallel [(const_int 0) + (const_int 8) + (const_int 1) + (const_int 9) + (const_int 2) + (const_int 10) + (const_int 3) + (const_int 11) + (const_int 4) + (const_int 12) + (const_int 5) + (const_int 13) + (const_int 6) + (const_int 14) + (const_int 7) + (const_int 15)])) + (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v") + (parallel [(const_int 8) + (const_int 0) + (const_int 9) + (const_int 1) + (const_int 10) + (const_int 2) + (const_int 11) + (const_int 3) + (const_int 12) + (const_int 4) + (const_int 13) + (const_int 5) + (const_int 14) + (const_int 6) + (const_int 15) + (const_int 7)])) + (const_int 21845)))] + "TARGET_ALTIVEC" + "vmrghb %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vmrghh" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v") + (parallel [(const_int 0) + (const_int 4) + (const_int 1) + (const_int 5) + (const_int 2) + (const_int 6) + (const_int 3) + (const_int 7)])) + (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v") + (parallel [(const_int 4) + (const_int 0) + (const_int 5) + (const_int 1) + (const_int 6) + (const_int 2) + (const_int 7) + (const_int 3)])) + (const_int 85)))] + "TARGET_ALTIVEC" + "vmrghh %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vmrghw" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v") + (parallel [(const_int 0) + (const_int 2) + (const_int 1) + (const_int 3)])) + (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v") + (parallel [(const_int 2) + (const_int 0) + (const_int 3) + (const_int 1)])) + (const_int 5)))] + "VECTOR_MEM_ALTIVEC_P (V4SImode)" + "vmrghw %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "*altivec_vmrghsf" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v") + (parallel [(const_int 0) + (const_int 2) + (const_int 1) + (const_int 3)])) + (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v") + (parallel [(const_int 2) + (const_int 0) + (const_int 3) + (const_int 1)])) + (const_int 5)))] + "VECTOR_MEM_ALTIVEC_P (V4SFmode)" + "vmrghw %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vmrglb" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v") + (parallel [(const_int 8) + (const_int 0) + (const_int 9) + (const_int 1) + (const_int 10) + (const_int 2) + (const_int 11) + (const_int 3) + (const_int 12) + (const_int 4) + (const_int 13) + (const_int 5) + (const_int 14) + (const_int 6) + (const_int 15) + (const_int 7)])) + (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v") + (parallel [(const_int 0) + (const_int 8) + (const_int 1) + (const_int 9) + (const_int 2) + (const_int 10) + (const_int 3) + (const_int 11) + (const_int 4) + (const_int 12) + (const_int 5) + (const_int 13) + (const_int 6) + (const_int 14) + (const_int 7) + (const_int 15)])) + (const_int 21845)))] + "TARGET_ALTIVEC" + "vmrglb %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vmrglh" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v") + (parallel [(const_int 4) + (const_int 0) + (const_int 5) + (const_int 1) + (const_int 6) + (const_int 2) + (const_int 7) + (const_int 3)])) + (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v") + (parallel [(const_int 0) + (const_int 4) + (const_int 1) + (const_int 5) + (const_int 2) + (const_int 6) + (const_int 3) + (const_int 7)])) + (const_int 85)))] + "TARGET_ALTIVEC" + "vmrglh %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vmrglw" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (vec_merge:V4SI + (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v") + (parallel [(const_int 2) + (const_int 0) + (const_int 3) + (const_int 1)])) + (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v") + (parallel [(const_int 0) + (const_int 2) + (const_int 1) + (const_int 3)])) + (const_int 5)))] + "VECTOR_MEM_ALTIVEC_P (V4SImode)" + "vmrglw %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "*altivec_vmrglsf" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (vec_merge:V4SF + (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v") + (parallel [(const_int 2) + (const_int 0) + (const_int 3) + (const_int 1)])) + (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v") + (parallel [(const_int 0) + (const_int 2) + (const_int 1) + (const_int 3)])) + (const_int 5)))] + "VECTOR_MEM_ALTIVEC_P (V4SFmode)" + "vmrglw %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vmuleub" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULEUB))] + "TARGET_ALTIVEC" + "vmuleub %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmulesb" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULESB))] + "TARGET_ALTIVEC" + "vmulesb %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmuleuh" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULEUH))] + "TARGET_ALTIVEC" + "vmuleuh %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmulesh" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULESH))] + "TARGET_ALTIVEC" + "vmulesh %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmuloub" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULOUB))] + "TARGET_ALTIVEC" + "vmuloub %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmulosb" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULOSB))] + "TARGET_ALTIVEC" + "vmulosb %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmulouh" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULOUH))] + "TARGET_ALTIVEC" + "vmulouh %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vmulosh" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULOSH))] + "TARGET_ALTIVEC" + "vmulosh %0,%1,%2" + [(set_attr "type" "veccomplex")]) + + +;; logical ops. Have the logical ops follow the memory ops in +;; terms of whether to prefer VSX or Altivec + +(define_insn "*altivec_and3" + [(set (match_operand:VM 0 "register_operand" "=v") + (and:VM (match_operand:VM 1 "register_operand" "v") + (match_operand:VM 2 "register_operand" "v")))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vand %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_ior3" + [(set (match_operand:VM 0 "register_operand" "=v") + (ior:VM (match_operand:VM 1 "register_operand" "v") + (match_operand:VM 2 "register_operand" "v")))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vor %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_xor3" + [(set (match_operand:VM 0 "register_operand" "=v") + (xor:VM (match_operand:VM 1 "register_operand" "v") + (match_operand:VM 2 "register_operand" "v")))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vxor %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_one_cmpl2" + [(set (match_operand:VM 0 "register_operand" "=v") + (not:VM (match_operand:VM 1 "register_operand" "v")))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vnor %0,%1,%1" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_nor3" + [(set (match_operand:VM 0 "register_operand" "=v") + (not:VM (ior:VM (match_operand:VM 1 "register_operand" "v") + (match_operand:VM 2 "register_operand" "v"))))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vnor %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_andc3" + [(set (match_operand:VM 0 "register_operand" "=v") + (and:VM (not:VM (match_operand:VM 2 "register_operand" "v")) + (match_operand:VM 1 "register_operand" "v")))] + "VECTOR_MEM_ALTIVEC_P (mode)" + "vandc %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vpkuhum" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VPKUHUM))] + "TARGET_ALTIVEC" + "vpkuhum %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkuwum" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VPKUWUM))] + "TARGET_ALTIVEC" + "vpkuwum %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkpx" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VPKPX))] + "TARGET_ALTIVEC" + "vpkpx %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkshss" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VPKSHSS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vpkshss %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkswss" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VPKSWSS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vpkswss %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkuhus" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VPKUHUS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vpkuhus %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkshus" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VPKSHUS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vpkshus %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkuwus" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VPKUWUS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vpkuwus %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vpkswus" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VPKSWUS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vpkswus %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "*altivec_vrl" + [(set (match_operand:VI 0 "register_operand" "=v") + (rotate:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vrl %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vsl" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSLV4SI))] + "TARGET_ALTIVEC" + "vsl %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vslo" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSLO))] + "TARGET_ALTIVEC" + "vslo %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "*altivec_vsl" + [(set (match_operand:VI 0 "register_operand" "=v") + (ashift:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vsl %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_vsr" + [(set (match_operand:VI 0 "register_operand" "=v") + (lshiftrt:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vsr %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "*altivec_vsra" + [(set (match_operand:VI 0 "register_operand" "=v") + (ashiftrt:VI (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vsra %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_vsr" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSR))] + "TARGET_ALTIVEC" + "vsr %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vsro" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSRO))] + "TARGET_ALTIVEC" + "vsro %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vsum4ubs" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSUM4UBS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vsum4ubs %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vsum4ss" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSUM4S)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vsum4ss %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vsum2sws" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSUM2SWS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vsum2sws %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vsumsws" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSUMSWS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vsumsws %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_insn "altivec_vspltb" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (vec_duplicate:V16QI + (vec_select:QI (match_operand:V16QI 1 "register_operand" "v") + (parallel + [(match_operand:QI 2 "u5bit_cint_operand" "")]))))] + "TARGET_ALTIVEC" + "vspltb %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vsplth" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (vec_duplicate:V8HI + (vec_select:HI (match_operand:V8HI 1 "register_operand" "v") + (parallel + [(match_operand:QI 2 "u5bit_cint_operand" "")]))))] + "TARGET_ALTIVEC" + "vsplth %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vspltw" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (vec_duplicate:V4SI + (vec_select:SI (match_operand:V4SI 1 "register_operand" "v") + (parallel + [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))] + "TARGET_ALTIVEC" + "vspltw %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vspltsf" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (vec_duplicate:V4SF + (vec_select:SF (match_operand:V4SF 1 "register_operand" "v") + (parallel + [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vspltw %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vspltis" + [(set (match_operand:VI 0 "register_operand" "=v") + (vec_duplicate:VI + (match_operand:QI 1 "s5bit_cint_operand" "i")))] + "TARGET_ALTIVEC" + "vspltis %0,%1" + [(set_attr "type" "vecperm")]) + +(define_insn "*altivec_vrfiz" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vrfiz %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vperm_" + [(set (match_operand:VM 0 "register_operand" "=v") + (unspec:VM [(match_operand:VM 1 "register_operand" "v") + (match_operand:VM 2 "register_operand" "v") + (match_operand:V16QI 3 "register_operand" "v")] + UNSPEC_VPERM))] + "TARGET_ALTIVEC" + "vperm %0,%1,%2,%3" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vperm__uns" + [(set (match_operand:VM 0 "register_operand" "=v") + (unspec:VM [(match_operand:VM 1 "register_operand" "v") + (match_operand:VM 2 "register_operand" "v") + (match_operand:V16QI 3 "register_operand" "v")] + UNSPEC_VPERM_UNS))] + "TARGET_ALTIVEC" + "vperm %0,%1,%2,%3" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vrfip" ; ceil + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] + UNSPEC_FRIP))] + "TARGET_ALTIVEC" + "vrfip %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vrfin" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] + UNSPEC_VRFIN))] + "TARGET_ALTIVEC" + "vrfin %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_insn "*altivec_vrfim" ; floor + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] + UNSPEC_FRIM))] + "TARGET_ALTIVEC" + "vrfim %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vcfux" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:QI 2 "immediate_operand" "i")] + UNSPEC_VCFUX))] + "TARGET_ALTIVEC" + "vcfux %0,%1,%2" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vcfsx" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:QI 2 "immediate_operand" "i")] + UNSPEC_VCFSX))] + "TARGET_ALTIVEC" + "vcfsx %0,%1,%2" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vctuxs" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v") + (match_operand:QI 2 "immediate_operand" "i")] + UNSPEC_VCTUXS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vctuxs %0,%1,%2" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vctsxs" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v") + (match_operand:QI 2 "immediate_operand" "i")] + UNSPEC_VCTSXS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vctsxs %0,%1,%2" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vlogefp" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] + UNSPEC_VLOGEFP))] + "TARGET_ALTIVEC" + "vlogefp %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vexptefp" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] + UNSPEC_VEXPTEFP))] + "TARGET_ALTIVEC" + "vexptefp %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_insn "*altivec_vrsqrtefp" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] + UNSPEC_RSQRT))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vrsqrtefp %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_insn "altivec_vrefp" + [(set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] + UNSPEC_FRES))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vrefp %0,%1" + [(set_attr "type" "vecfloat")]) + +(define_expand "altivec_copysign_v4sf3" + [(use (match_operand:V4SF 0 "register_operand" "")) + (use (match_operand:V4SF 1 "register_operand" "")) + (use (match_operand:V4SF 2 "register_operand" ""))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + " +{ + rtx mask = gen_reg_rtx (V4SImode); + rtvec v = rtvec_alloc (4); + unsigned HOST_WIDE_INT mask_val = ((unsigned HOST_WIDE_INT)1) << 31; + + RTVEC_ELT (v, 0) = GEN_INT (mask_val); + RTVEC_ELT (v, 1) = GEN_INT (mask_val); + RTVEC_ELT (v, 2) = GEN_INT (mask_val); + RTVEC_ELT (v, 3) = GEN_INT (mask_val); + + emit_insn (gen_vec_initv4si (mask, gen_rtx_PARALLEL (V4SImode, v))); + emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2], + gen_lowpart (V4SFmode, mask))); + DONE; +}") + +(define_insn "altivec_vsldoi_" + [(set (match_operand:VM 0 "register_operand" "=v") + (unspec:VM [(match_operand:VM 1 "register_operand" "v") + (match_operand:VM 2 "register_operand" "v") + (match_operand:QI 3 "immediate_operand" "i")] + UNSPEC_VLSDOI))] + "TARGET_ALTIVEC" + "vsldoi %0,%1,%2,%3" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vupkhsb" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_VUPKHSB))] + "TARGET_ALTIVEC" + "vupkhsb %0,%1" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vupkhpx" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKHPX))] + "TARGET_ALTIVEC" + "vupkhpx %0,%1" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vupkhsh" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKHSH))] + "TARGET_ALTIVEC" + "vupkhsh %0,%1" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vupklsb" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_VUPKLSB))] + "TARGET_ALTIVEC" + "vupklsb %0,%1" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vupklpx" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKLPX))] + "TARGET_ALTIVEC" + "vupklpx %0,%1" + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vupklsh" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKLSH))] + "TARGET_ALTIVEC" + "vupklsh %0,%1" + [(set_attr "type" "vecperm")]) + +;; Compare vectors producing a vector result and a predicate, setting CR6 to +;; indicate a combined status +(define_insn "*altivec_vcmpequ_p" + [(set (reg:CC 74) + (unspec:CC [(eq:CC (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v"))] + UNSPEC_PREDICATE)) + (set (match_operand:VI 0 "register_operand" "=v") + (eq:VI (match_dup 1) + (match_dup 2)))] + "VECTOR_UNIT_ALTIVEC_P (mode)" + "vcmpequ. %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_vcmpgts_p" + [(set (reg:CC 74) + (unspec:CC [(gt:CC (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v"))] + UNSPEC_PREDICATE)) + (set (match_operand:VI 0 "register_operand" "=v") + (gt:VI (match_dup 1) + (match_dup 2)))] + "VECTOR_UNIT_ALTIVEC_P (mode)" + "vcmpgts. %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_vcmpgtu_p" + [(set (reg:CC 74) + (unspec:CC [(gtu:CC (match_operand:VI 1 "register_operand" "v") + (match_operand:VI 2 "register_operand" "v"))] + UNSPEC_PREDICATE)) + (set (match_operand:VI 0 "register_operand" "=v") + (gtu:VI (match_dup 1) + (match_dup 2)))] + "VECTOR_UNIT_ALTIVEC_P (mode)" + "vcmpgtu. %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_vcmpeqfp_p" + [(set (reg:CC 74) + (unspec:CC [(eq:CC (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v"))] + UNSPEC_PREDICATE)) + (set (match_operand:V4SF 0 "register_operand" "=v") + (eq:V4SF (match_dup 1) + (match_dup 2)))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vcmpeqfp. %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_vcmpgtfp_p" + [(set (reg:CC 74) + (unspec:CC [(gt:CC (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v"))] + UNSPEC_PREDICATE)) + (set (match_operand:V4SF 0 "register_operand" "=v") + (gt:V4SF (match_dup 1) + (match_dup 2)))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vcmpgtfp. %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "*altivec_vcmpgefp_p" + [(set (reg:CC 74) + (unspec:CC [(ge:CC (match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v"))] + UNSPEC_PREDICATE)) + (set (match_operand:V4SF 0 "register_operand" "=v") + (ge:V4SF (match_dup 1) + (match_dup 2)))] + "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" + "vcmpgefp. %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "altivec_vcmpbfp_p" + [(set (reg:CC 74) + (unspec:CC [(match_operand:V4SF 1 "register_operand" "v") + (match_operand:V4SF 2 "register_operand" "v")] + UNSPEC_VCMPBFP)) + (set (match_operand:V4SF 0 "register_operand" "=v") + (unspec:V4SF [(match_dup 1) + (match_dup 2)] + UNSPEC_VCMPBFP))] + "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" + "vcmpbfp. %0,%1,%2" + [(set_attr "type" "veccmp")]) + +(define_insn "altivec_mtvscr" + [(set (reg:SI 110) + (unspec_volatile:SI + [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))] + "TARGET_ALTIVEC" + "mtvscr %0" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_mfvscr" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec_volatile:V8HI [(reg:SI 110)] UNSPECV_MFVSCR))] + "TARGET_ALTIVEC" + "mfvscr %0" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_dssall" + [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)] + "TARGET_ALTIVEC" + "dssall" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_dss" + [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")] + UNSPECV_DSS)] + "TARGET_ALTIVEC" + "dss %0" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_dst" + [(unspec [(match_operand 0 "register_operand" "b") + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)] + "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode" + "dst %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_dstt" + [(unspec [(match_operand 0 "register_operand" "b") + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)] + "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode" + "dstt %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_dstst" + [(unspec [(match_operand 0 "register_operand" "b") + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)] + "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode" + "dstst %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_dststt" + [(unspec [(match_operand 0 "register_operand" "b") + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)] + "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode" + "dststt %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "altivec_lvsl" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] UNSPEC_LVSL))] + "TARGET_ALTIVEC" + "lvsl %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_lvsr" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] UNSPEC_LVSR))] + "TARGET_ALTIVEC" + "lvsr %0,%y1" + [(set_attr "type" "vecload")]) + +(define_expand "build_vector_mask_for_load" + [(set (match_operand:V16QI 0 "register_operand" "") + (unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_LVSR))] + "TARGET_ALTIVEC" + " +{ + rtx addr; + rtx temp; + + gcc_assert (GET_CODE (operands[1]) == MEM); + + addr = XEXP (operands[1], 0); + temp = gen_reg_rtx (GET_MODE (addr)); + emit_insn (gen_rtx_SET (VOIDmode, temp, + gen_rtx_NEG (GET_MODE (addr), addr))); + emit_insn (gen_altivec_lvsr (operands[0], + replace_equiv_address (operands[1], temp))); + DONE; +}") + +;; Parallel some of the LVE* and STV*'s with unspecs because some have +;; identical rtl but different instructions-- and gcc gets confused. + +(define_insn "altivec_lvex" + [(parallel + [(set (match_operand:VI 0 "register_operand" "=v") + (match_operand:VI 1 "memory_operand" "Z")) + (unspec [(const_int 0)] UNSPEC_LVE)])] + "TARGET_ALTIVEC" + "lvex %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "*altivec_lvesfx" + [(parallel + [(set (match_operand:V4SF 0 "register_operand" "=v") + (match_operand:V4SF 1 "memory_operand" "Z")) + (unspec [(const_int 0)] UNSPEC_LVE)])] + "TARGET_ALTIVEC" + "lvewx %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_lvxl" + [(parallel + [(set (match_operand:V4SI 0 "register_operand" "=v") + (match_operand:V4SI 1 "memory_operand" "Z")) + (unspec [(const_int 0)] UNSPEC_SET_VSCR)])] + "TARGET_ALTIVEC" + "lvxl %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_lvx_" + [(parallel + [(set (match_operand:VM2 0 "register_operand" "=v") + (match_operand:VM2 1 "memory_operand" "Z")) + (unspec [(const_int 0)] UNSPEC_LVX)])] + "TARGET_ALTIVEC" + "lvx %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_stvx_" + [(parallel + [(set (match_operand:VM2 0 "memory_operand" "=Z") + (match_operand:VM2 1 "register_operand" "v")) + (unspec [(const_int 0)] UNSPEC_STVX)])] + "TARGET_ALTIVEC" + "stvx %1,%y0" + [(set_attr "type" "vecstore")]) + +(define_insn "altivec_stvxl" + [(parallel + [(set (match_operand:V4SI 0 "memory_operand" "=Z") + (match_operand:V4SI 1 "register_operand" "v")) + (unspec [(const_int 0)] UNSPEC_STVXL)])] + "TARGET_ALTIVEC" + "stvxl %1,%y0" + [(set_attr "type" "vecstore")]) + +(define_insn "altivec_stvex" + [(set (match_operand: 0 "memory_operand" "=Z") + (unspec: [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))] + "TARGET_ALTIVEC" + "stvex %1,%y0" + [(set_attr "type" "vecstore")]) + +(define_insn "*altivec_stvesfx" + [(set (match_operand:SF 0 "memory_operand" "=Z") + (unspec:SF [(match_operand:V4SF 1 "register_operand" "v")] UNSPEC_STVE))] + "TARGET_ALTIVEC" + "stvewx %1,%y0" + [(set_attr "type" "vecstore")]) + +;; Generate +;; vspltis? SCRATCH0,0 +;; vsubu?m SCRATCH2,SCRATCH1,%1 +;; vmaxs? %0,%1,SCRATCH2" +(define_expand "abs2" + [(set (match_dup 2) (vec_duplicate:VI (const_int 0))) + (set (match_dup 3) + (minus:VI (match_dup 2) + (match_operand:VI 1 "register_operand" "v"))) + (set (match_operand:VI 0 "register_operand" "=v") + (smax:VI (match_dup 1) (match_dup 3)))] + "TARGET_ALTIVEC" +{ + operands[2] = gen_reg_rtx (GET_MODE (operands[0])); + operands[3] = gen_reg_rtx (GET_MODE (operands[0])); +}) + +;; Generate +;; vspltisw SCRATCH1,-1 +;; vslw SCRATCH2,SCRATCH1,SCRATCH1 +;; vandc %0,%1,SCRATCH2 +(define_expand "altivec_absv4sf2" + [(set (match_dup 2) + (vec_duplicate:V4SI (const_int -1))) + (set (match_dup 3) + (ashift:V4SI (match_dup 2) (match_dup 2))) + (set (match_operand:V4SF 0 "register_operand" "=v") + (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0)) + (match_operand:V4SF 1 "register_operand" "v")))] + "TARGET_ALTIVEC" +{ + operands[2] = gen_reg_rtx (V4SImode); + operands[3] = gen_reg_rtx (V4SImode); +}) + +;; Generate +;; vspltis? SCRATCH0,0 +;; vsubs?s SCRATCH2,SCRATCH1,%1 +;; vmaxs? %0,%1,SCRATCH2" +(define_expand "altivec_abss_" + [(set (match_dup 2) (vec_duplicate:VI (const_int 0))) + (parallel [(set (match_dup 3) + (unspec:VI [(match_dup 2) + (match_operand:VI 1 "register_operand" "v")] + UNSPEC_VSUBS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]) + (set (match_operand:VI 0 "register_operand" "=v") + (smax:VI (match_dup 1) (match_dup 3)))] + "TARGET_ALTIVEC" +{ + operands[2] = gen_reg_rtx (GET_MODE (operands[0])); + operands[3] = gen_reg_rtx (GET_MODE (operands[0])); +}) + +(define_insn "altivec_vsumsws_nomode" + [(set (match_operand 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VSUMSWS)) + (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))] + "TARGET_ALTIVEC" + "vsumsws %0,%1,%2" + [(set_attr "type" "veccomplex")]) + +(define_expand "reduc_splus_" + [(set (match_operand:VIshort 0 "register_operand" "=v") + (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")] + UNSPEC_REDUC_PLUS))] + "TARGET_ALTIVEC" + " +{ + rtx vzero = gen_reg_rtx (V4SImode); + rtx vtmp1 = gen_reg_rtx (V4SImode); + + emit_insn (gen_altivec_vspltisw (vzero, const0_rtx)); + emit_insn (gen_altivec_vsum4ss (vtmp1, operands[1], vzero)); + emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero)); + DONE; +}") + +(define_expand "reduc_uplus_v16qi" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_REDUC_PLUS))] + "TARGET_ALTIVEC" + " +{ + rtx vzero = gen_reg_rtx (V4SImode); + rtx vtmp1 = gen_reg_rtx (V4SImode); + + emit_insn (gen_altivec_vspltisw (vzero, const0_rtx)); + emit_insn (gen_altivec_vsum4ubs (vtmp1, operands[1], vzero)); + emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero)); + DONE; +}") + +(define_expand "neg2" + [(use (match_operand:VI 0 "register_operand" "")) + (use (match_operand:VI 1 "register_operand" ""))] + "TARGET_ALTIVEC" + " +{ + rtx vzero; + + vzero = gen_reg_rtx (GET_MODE (operands[0])); + emit_insn (gen_altivec_vspltis (vzero, const0_rtx)); + emit_insn (gen_sub3 (operands[0], vzero, operands[1])); + + DONE; +}") + +(define_expand "udot_prod" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (plus:V4SI (match_operand:V4SI 3 "register_operand" "v") + (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v") + (match_operand:VIshort 2 "register_operand" "v")] + UNSPEC_VMSUMU)))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vmsumum (operands[0], operands[1], operands[2], operands[3])); + DONE; +}") + +(define_expand "sdot_prodv8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (plus:V4SI (match_operand:V4SI 3 "register_operand" "v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMSUMSHM)))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3])); + DONE; +}") + +(define_expand "widen_usum3" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (plus:V4SI (match_operand:V4SI 2 "register_operand" "v") + (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")] + UNSPEC_VMSUMU)))] + "TARGET_ALTIVEC" + " +{ + rtx vones = gen_reg_rtx (GET_MODE (operands[1])); + + emit_insn (gen_altivec_vspltis (vones, const1_rtx)); + emit_insn (gen_altivec_vmsumum (operands[0], operands[1], vones, operands[2])); + DONE; +}") + +(define_expand "widen_ssumv16qi3" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (plus:V4SI (match_operand:V4SI 2 "register_operand" "v") + (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_VMSUMM)))] + "TARGET_ALTIVEC" + " +{ + rtx vones = gen_reg_rtx (V16QImode); + + emit_insn (gen_altivec_vspltisb (vones, const1_rtx)); + emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2])); + DONE; +}") + +(define_expand "widen_ssumv8hi3" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (plus:V4SI (match_operand:V4SI 2 "register_operand" "v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VMSUMSHM)))] + "TARGET_ALTIVEC" + " +{ + rtx vones = gen_reg_rtx (V8HImode); + + emit_insn (gen_altivec_vspltish (vones, const1_rtx)); + emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2])); + DONE; +}") + +(define_expand "vec_unpacks_hi_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_VUPKHSB))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vupkhsb (operands[0], operands[1])); + DONE; +}") + +(define_expand "vec_unpacks_hi_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKHSH))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vupkhsh (operands[0], operands[1])); + DONE; +}") + +(define_expand "vec_unpacks_lo_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_VUPKLSB))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vupklsb (operands[0], operands[1])); + DONE; +}") + +(define_expand "vec_unpacks_lo_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKLSH))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vupklsh (operands[0], operands[1])); + DONE; +}") + +(define_insn "vperm_v8hiv4si" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v") + (match_operand:V16QI 3 "register_operand" "v")] + UNSPEC_VPERMSI))] + "TARGET_ALTIVEC" + "vperm %0,%1,%2,%3" + [(set_attr "type" "vecperm")]) + +(define_insn "vperm_v16qiv8hi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v") + (match_operand:V16QI 3 "register_operand" "v")] + UNSPEC_VPERMHI))] + "TARGET_ALTIVEC" + "vperm %0,%1,%2,%3" + [(set_attr "type" "vecperm")]) + + +(define_expand "vec_unpacku_hi_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_VUPKHUB))] + "TARGET_ALTIVEC" + " +{ + rtx vzero = gen_reg_rtx (V8HImode); + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + emit_insn (gen_altivec_vspltish (vzero, const0_rtx)); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 0); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 1); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 2); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 3); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 4); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 5); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 6); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 7); + + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask)); + DONE; +}") + +(define_expand "vec_unpacku_hi_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKHUH))] + "TARGET_ALTIVEC" + " +{ + rtx vzero = gen_reg_rtx (V4SImode); + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + emit_insn (gen_altivec_vspltisw (vzero, const0_rtx)); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 0); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 1); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 2); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 3); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 4); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 5); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 6); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 7); + + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask)); + DONE; +}") + +(define_expand "vec_unpacku_lo_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] + UNSPEC_VUPKLUB))] + "TARGET_ALTIVEC" + " +{ + rtx vzero = gen_reg_rtx (V8HImode); + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + emit_insn (gen_altivec_vspltish (vzero, const0_rtx)); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 8); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 9); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 10); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 12); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 13); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 14); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 15); + + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask)); + DONE; +}") + +(define_expand "vec_unpacku_lo_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] + UNSPEC_VUPKLUH))] + "TARGET_ALTIVEC" + " +{ + rtx vzero = gen_reg_rtx (V4SImode); + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + emit_insn (gen_altivec_vspltisw (vzero, const0_rtx)); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 8); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 9); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 10); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 12); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 13); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 14); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 15); + + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask)); + DONE; +}") + +(define_expand "vec_widen_umult_hi_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULWHUB))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V8HImode); + rtx vo = gen_reg_rtx (V8HImode); + + emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrghh (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_widen_umult_lo_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULWLUB))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V8HImode); + rtx vo = gen_reg_rtx (V8HImode); + + emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrglh (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_widen_smult_hi_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULWHSB))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V8HImode); + rtx vo = gen_reg_rtx (V8HImode); + + emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrghh (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_widen_smult_lo_v16qi" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMULWLSB))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V8HImode); + rtx vo = gen_reg_rtx (V8HImode); + + emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrglh (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_widen_umult_hi_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULWHUH))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V4SImode); + rtx vo = gen_reg_rtx (V4SImode); + + emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrghw (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_widen_umult_lo_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULWLUH))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V4SImode); + rtx vo = gen_reg_rtx (V4SImode); + + emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrglw (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_widen_smult_hi_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULWHSH))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V4SImode); + rtx vo = gen_reg_rtx (V4SImode); + + emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrghw (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_widen_smult_lo_v8hi" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VMULWLSH))] + "TARGET_ALTIVEC" + " +{ + rtx ve = gen_reg_rtx (V4SImode); + rtx vo = gen_reg_rtx (V4SImode); + + emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2])); + emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2])); + emit_insn (gen_altivec_vmrglw (operands[0], ve, vo)); + DONE; +}") + +(define_expand "vec_pack_trunc_v8hi" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")] + UNSPEC_VPKUHUM))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vpkuhum (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_pack_trunc_v4si" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:V4SI 2 "register_operand" "v")] + UNSPEC_VPKUWUM))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vpkuwum (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "altivec_negv4sf2" + [(use (match_operand:V4SF 0 "register_operand" "")) + (use (match_operand:V4SF 1 "register_operand" ""))] + "TARGET_ALTIVEC" + " +{ + rtx neg0; + + /* Generate [-0.0, -0.0, -0.0, -0.0]. */ + neg0 = gen_reg_rtx (V4SImode); + emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx)); + emit_insn (gen_vashlv4si3 (neg0, neg0, neg0)); + + /* XOR */ + emit_insn (gen_xorv4sf3 (operands[0], + gen_lowpart (V4SFmode, neg0), operands[1])); + + DONE; +}") + +;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL, +;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell. +(define_insn "altivec_lvlx" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] + UNSPEC_LVLX))] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "lvlx %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_lvlxl" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] + UNSPEC_LVLXL))] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "lvlxl %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_lvrx" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] + UNSPEC_LVRX))] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "lvrx %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_lvrxl" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] + UNSPEC_LVRXL))] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "lvrxl %0,%y1" + [(set_attr "type" "vecload")]) + +(define_insn "altivec_stvlx" + [(parallel + [(set (match_operand:V16QI 0 "memory_operand" "=Z") + (match_operand:V16QI 1 "register_operand" "v")) + (unspec [(const_int 0)] UNSPEC_STVLX)])] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "stvlx %1,%y0" + [(set_attr "type" "vecstore")]) + +(define_insn "altivec_stvlxl" + [(parallel + [(set (match_operand:V16QI 0 "memory_operand" "=Z") + (match_operand:V16QI 1 "register_operand" "v")) + (unspec [(const_int 0)] UNSPEC_STVLXL)])] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "stvlxl %1,%y0" + [(set_attr "type" "vecstore")]) + +(define_insn "altivec_stvrx" + [(parallel + [(set (match_operand:V16QI 0 "memory_operand" "=Z") + (match_operand:V16QI 1 "register_operand" "v")) + (unspec [(const_int 0)] UNSPEC_STVRX)])] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "stvrx %1,%y0" + [(set_attr "type" "vecstore")]) + +(define_insn "altivec_stvrxl" + [(parallel + [(set (match_operand:V16QI 0 "memory_operand" "=Z") + (match_operand:V16QI 1 "register_operand" "v")) + (unspec [(const_int 0)] UNSPEC_STVRXL)])] + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL" + "stvrxl %1,%y0" + [(set_attr "type" "vecstore")]) + +(define_expand "vec_extract_evenv4si" + [(set (match_operand:V4SI 0 "register_operand" "") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "") + (match_operand:V4SI 2 "register_operand" "")] + UNSPEC_EXTEVEN_V4SI))] + "TARGET_ALTIVEC" + " +{ + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 1); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 2); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 3); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 9); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 10); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 18); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 19); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 25); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 26); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 27); + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_altivec_vperm_v4si (operands[0], operands[1], operands[2], mask)); + + DONE; +}") + +(define_expand "vec_extract_evenv4sf" + [(set (match_operand:V4SF 0 "register_operand" "") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "") + (match_operand:V4SF 2 "register_operand" "")] + UNSPEC_EXTEVEN_V4SF))] + "TARGET_ALTIVEC" + " +{ + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 1); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 2); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 3); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 9); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 10); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 18); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 19); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 25); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 26); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 27); + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_altivec_vperm_v4sf (operands[0], operands[1], operands[2], mask)); + + DONE; +}") + +(define_expand "vec_extract_evenv8hi" + [(set (match_operand:V8HI 0 "register_operand" "") + (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "") + (match_operand:V8HI 2 "register_operand" "")] + UNSPEC_EXTEVEN_V8HI))] + "TARGET_ALTIVEC" + " +{ + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 1); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 4); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 5); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 9); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 12); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 13); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 20); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 21); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 25); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 28); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 29); + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_altivec_vperm_v8hi (operands[0], operands[1], operands[2], mask)); + + DONE; +}") + +(define_expand "vec_extract_evenv16qi" + [(set (match_operand:V16QI 0 "register_operand" "") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "") + (match_operand:V16QI 2 "register_operand" "")] + UNSPEC_EXTEVEN_V16QI))] + "TARGET_ALTIVEC" + " +{ + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 2); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 4); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 6); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 10); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 12); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 14); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 18); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 20); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 22); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 26); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 28); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 30); + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_altivec_vperm_v16qi (operands[0], operands[1], operands[2], mask)); + + DONE; +}") + +(define_expand "vec_extract_oddv4si" + [(set (match_operand:V4SI 0 "register_operand" "") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "") + (match_operand:V4SI 2 "register_operand" "")] + UNSPEC_EXTODD_V4SI))] + "TARGET_ALTIVEC" + " +{ + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 4); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 5); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 6); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 7); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 12); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 13); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 14); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 15); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 20); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 21); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 22); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 23); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 28); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 29); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 30); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 31); + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_altivec_vperm_v4si (operands[0], operands[1], operands[2], mask)); + + DONE; +}") + +(define_expand "vec_extract_oddv4sf" + [(set (match_operand:V4SF 0 "register_operand" "") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "") + (match_operand:V4SF 2 "register_operand" "")] + UNSPEC_EXTODD_V4SF))] + "TARGET_ALTIVEC" + " +{ + rtx mask = gen_reg_rtx (V16QImode); + rtvec v = rtvec_alloc (16); + + RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 4); + RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 5); + RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 6); + RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 7); + RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 12); + RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 13); + RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 14); + RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 15); + RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 20); + RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 21); + RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 22); + RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 23); + RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 28); + RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 29); + RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 30); + RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 31); + emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v))); + emit_insn (gen_altivec_vperm_v4sf (operands[0], operands[1], operands[2], mask)); + + DONE; +}") + +(define_insn "vpkuhum_nomode" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand 1 "register_operand" "v") + (match_operand 2 "register_operand" "v")] + UNSPEC_VPKUHUM))] + "TARGET_ALTIVEC" + "vpkuhum %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_insn "vpkuwum_nomode" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand 1 "register_operand" "v") + (match_operand 2 "register_operand" "v")] + UNSPEC_VPKUWUM))] + "TARGET_ALTIVEC" + "vpkuwum %0,%1,%2" + [(set_attr "type" "vecperm")]) + +(define_expand "vec_extract_oddv8hi" + [(set (match_operand:V8HI 0 "register_operand" "") + (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "") + (match_operand:V8HI 2 "register_operand" "")] + UNSPEC_EXTODD_V8HI))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_vpkuwum_nomode (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_extract_oddv16qi" + [(set (match_operand:V16QI 0 "register_operand" "") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "") + (match_operand:V16QI 2 "register_operand" "")] + UNSPEC_EXTODD_V16QI))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_vpkuhum_nomode (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_interleave_high" + [(set (match_operand:VI 0 "register_operand" "") + (unspec:VI [(match_operand:VI 1 "register_operand" "") + (match_operand:VI 2 "register_operand" "")] + UNSPEC_INTERHI))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vmrgh (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_interleave_low" + [(set (match_operand:VI 0 "register_operand" "") + (unspec:VI [(match_operand:VI 1 "register_operand" "") + (match_operand:VI 2 "register_operand" "")] + UNSPEC_INTERLO))] + "TARGET_ALTIVEC" + " +{ + emit_insn (gen_altivec_vmrgl (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_unpacks_float_hi_v8hi" + [(set (match_operand:V4SF 0 "register_operand" "") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + UNSPEC_VUPKHS_V4SF))] + "TARGET_ALTIVEC" + " +{ + rtx tmp = gen_reg_rtx (V4SImode); + + emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1])); + emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx)); + DONE; +}") + +(define_expand "vec_unpacks_float_lo_v8hi" + [(set (match_operand:V4SF 0 "register_operand" "") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + UNSPEC_VUPKLS_V4SF))] + "TARGET_ALTIVEC" + " +{ + rtx tmp = gen_reg_rtx (V4SImode); + + emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1])); + emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx)); + DONE; +}") + +(define_expand "vec_unpacku_float_hi_v8hi" + [(set (match_operand:V4SF 0 "register_operand" "") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + UNSPEC_VUPKHU_V4SF))] + "TARGET_ALTIVEC" + " +{ + rtx tmp = gen_reg_rtx (V4SImode); + + emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1])); + emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx)); + DONE; +}") + +(define_expand "vec_unpacku_float_lo_v8hi" + [(set (match_operand:V4SF 0 "register_operand" "") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + UNSPEC_VUPKLU_V4SF))] + "TARGET_ALTIVEC" + " +{ + rtx tmp = gen_reg_rtx (V4SImode); + + emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1])); + emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx)); + DONE; +}") diff --git a/gcc/config/rs6000/biarch64.h b/gcc/config/rs6000/biarch64.h new file mode 100644 index 000000000..29e5b029b --- /dev/null +++ b/gcc/config/rs6000/biarch64.h @@ -0,0 +1,26 @@ +/* Definitions of target machine for GNU compiler, for 32/64 bit powerpc. + Copyright (C) 2003, 2007, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* Specify this in a cover file to provide bi-architecture (32/64) support. */ +#define RS6000_BI_ARCH 1 diff --git a/gcc/config/rs6000/cell.md b/gcc/config/rs6000/cell.md new file mode 100644 index 000000000..dac9da943 --- /dev/null +++ b/gcc/config/rs6000/cell.md @@ -0,0 +1,400 @@ +;; Scheduling description for cell processor. +;; Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009 +;; Free Software Foundation, Inc. +;; Contributed by Sony Computer Entertainment, Inc., + + +;; This file is free software; you can redistribute it and/or modify it under +;; the terms of the GNU General Public License as published by the Free +;; Software Foundation; either version 3 of the License, or (at your option) +;; any later version. + +;; This file is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +;; for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; Sources: BE BOOK4 (/sfs/enc/doc/PPU_BookIV_DD3.0_latest.pdf) + +;; BE Architecture *DD3.0 and DD3.1* +;; This file simulate PPU processor unit backend of pipeline, maualP24. +;; manual P27, stall and flush points +;; IU, XU, VSU, dispatcher decodes and dispatch 2 insns per cycle in program +;; order, the grouped address are aligned by 8 +;; This file only simulate one thread situation +;; XU executes all fixed point insns(3 units, a simple alu, a complex unit, +;; and load/store unit) +;; VSU executes all scalar floating points insn(a float unit), +;; VMX insns(VMX unit, 4 sub units, simple, permute, complex, floating point) + +;; Dual issue combination + +;; FXU LSU BR VMX VMX +;; (sx,cx,vsu_fp,fp_arith) (perm,vsu_ls,fp_ls) +;;FXU X +;;LSU X X X +;;BR X +;;VMX(sx,cx,vsu_fp,fp_arth) X +;;VMX(perm,vsu_ls, fp_ls) X +;; X are illegal combination. + +;; Dual issue exceptions: +;;(1) nop-pipelined FXU instr in slot 0 +;;(2) non-pipelined FPU inst in slot 0 +;; CSI instr(contex-synchronizing insn) +;; Microcode insn + +;; BRU unit: bru(none register stall), bru_cr(cr register stall) +;; VSU unit: vus(vmx simple), vup(vmx permute), vuc(vmx complex), +;; vuf(vmx float), fpu(floats). fpu_div is hypothetical, it is for +;; nonpipelined simulation +;; micr insns will stall at least 7 cycles to get the first instr from ROM, +;; micro instructions are not dual issued. + +;; slot0 is older than slot1 +;; non-pipelined insn need to be in slot1 to avoid 1cycle stall + +;; There different stall point +;; IB2, only stall one thread if stall here, so try to stall here as much as +;; we can +;; condition(1) insert nop, OR and ORI instruction form +;; condition(2) flush happens, in case of: RAW, WAW, D-ERAT miss, or +;; CR0-access while stdcx, or stwcx +;; IS2 stall ;; Page91 for details +;; VQ8 stall +;; IS2 stall can be activated by VQ8 stall and trying to issue a vsu instr to +;; the vsu issue queue + +;;(define_automaton "cellxu") + +;;(define_cpu_unit "fxu_cell,lsu_cell,bru_cell,vsu1_cell,vsu2_cell" "cellxu") + +;; ndfa +(define_automaton "cellxu,cellvsu,cellbru,cell_mis") + +(define_cpu_unit "fxu_cell,lsu_cell" "cellxu") +(define_cpu_unit "bru_cell" "cellbru") +(define_cpu_unit "vsu1_cell,vsu2_cell" "cellvsu") + +(define_cpu_unit "slot0,slot1" "cell_mis") + +(absence_set "slot0" "slot1") + +(define_reservation "nonpipeline" "fxu_cell+lsu_cell+vsu1_cell+vsu2_cell") +(define_reservation "slot01" "slot0|slot1") + + +;; Load/store +;; lmw, lswi, lswx are only generated for optimize for space, MC, +;; these instr are not simulated +(define_insn_reservation "cell-load" 2 + (and (eq_attr "type" "load") + (eq_attr "cpu" "cell")) + "slot01,lsu_cell") + +;; ldux, ldu, lbzux, lbzu, hardware breaks it down to two instrs, +;; if with 32bytes alignment, CMC +(define_insn_reservation "cell-load-ux" 2 + (and (eq_attr "type" "load_ux,load_u") + (eq_attr "cpu" "cell")) + "slot01,fxu_cell+lsu_cell") + +;; lha, lhax, lhau, lhaux, lwa, lwax, lwaux, MC, latency unknown +;; 11/7, 11/8, 11/12 +(define_insn_reservation "cell-load-ext" 2 + (and (eq_attr "type" "load_ext,load_ext_u,load_ext_ux") + (eq_attr "cpu" "cell")) + "slot01,fxu_cell+lsu_cell") + +;;lfs,lfsx,lfd,lfdx, 1 cycle +(define_insn_reservation "cell-fpload" 1 + (and (eq_attr "type" "fpload") + (eq_attr "cpu" "cell")) + "vsu2_cell+lsu_cell+slot01") + +;; lfsu,lfsux,lfdu,lfdux 1cycle(fpr) 2 cycle(gpr) +(define_insn_reservation "cell-fpload-update" 1 + (and (eq_attr "type" "fpload,fpload_u,fpload_ux") + (eq_attr "cpu" "cell")) + "fxu_cell+vsu2_cell+lsu_cell+slot01") + +(define_insn_reservation "cell-vecload" 2 + (and (eq_attr "type" "vecload") + (eq_attr "cpu" "cell")) + "slot01,vsu2_cell+lsu_cell") + +;;st? stw(MC) +(define_insn_reservation "cell-store" 1 + (and (eq_attr "type" "store") + (eq_attr "cpu" "cell")) + "lsu_cell+slot01") + +;;stdux, stdu, (hardware breaks into store and add) 2 for update reg +(define_insn_reservation "cell-store-update" 1 + (and (eq_attr "type" "store_ux,store_u") + (eq_attr "cpu" "cell")) + "fxu_cell+lsu_cell+slot01") + +(define_insn_reservation "cell-fpstore" 1 + (and (eq_attr "type" "fpstore") + (eq_attr "cpu" "cell")) + "vsu2_cell+lsu_cell+slot01") + +(define_insn_reservation "cell-fpstore-update" 1 + (and (eq_attr "type" "fpstore_ux,fpstore_u") + (eq_attr "cpu" "cell")) + "vsu2_cell+fxu_cell+lsu_cell+slot01") + +(define_insn_reservation "cell-vecstore" 1 + (and (eq_attr "type" "vecstore") + (eq_attr "cpu" "cell")) + "vsu2_cell+lsu_cell+slot01") + +;; Integer latency is 2 cycles +(define_insn_reservation "cell-integer" 2 + (and (eq_attr "type" "integer,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "cell")) + "slot01,fxu_cell") + +;; Two integer latency is 4 cycles +(define_insn_reservation "cell-two" 4 + (and (eq_attr "type" "two") + (eq_attr "cpu" "cell")) + "slot01,fxu_cell,fxu_cell*2") + +;; Three integer latency is 6 cycles +(define_insn_reservation "cell-three" 6 + (and (eq_attr "type" "three") + (eq_attr "cpu" "cell")) + "slot01,fxu_cell,fxu_cell*4") + +;; rlwimi, alter cr0 +(define_insn_reservation "cell-insert" 2 + (and (eq_attr "type" "insert_word") + (eq_attr "cpu" "cell")) + "slot01,fxu_cell") + +;; cmpi, cmpli, cmpla, add, addo, sub, subo, alter cr0 +(define_insn_reservation "cell-cmp" 1 + (and (eq_attr "type" "cmp") + (eq_attr "cpu" "cell")) + "fxu_cell+slot01") + +;; add, addo, sub, subo, alter cr0, rldcli, rlwinm +(define_insn_reservation "cell-fast-cmp" 2 + (and (and (eq_attr "type" "fast_compare,delayed_compare,compare,\ + var_delayed_compare") + (eq_attr "cpu" "cell")) + (eq_attr "cell_micro" "not")) + "slot01,fxu_cell") + +(define_insn_reservation "cell-cmp-microcoded" 9 + (and (and (eq_attr "type" "fast_compare,delayed_compare,compare,\ + var_delayed_compare") + (eq_attr "cpu" "cell")) + (eq_attr "cell_micro" "always")) + "slot0+slot1,fxu_cell,fxu_cell*7") + +;; mulld +(define_insn_reservation "cell-lmul" 15 + (and (eq_attr "type" "lmul") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*13") + +;; mulld. is microcoded +(define_insn_reservation "cell-lmul-cmp" 22 + (and (eq_attr "type" "lmul_compare") + (eq_attr "cpu" "cell")) + "slot0+slot1,nonpipeline,nonpipeline*20") + +;; mulli, 6 cycles +(define_insn_reservation "cell-imul23" 6 + (and (eq_attr "type" "imul2,imul3") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*4") + +;; mullw, 9 +(define_insn_reservation "cell-imul" 9 + (and (eq_attr "type" "imul") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*7") + +;; divide +(define_insn_reservation "cell-idiv" 32 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*30") + +(define_insn_reservation "cell-ldiv" 64 + (and (eq_attr "type" "ldiv") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*62") + +;;mflr and mfctr are pipelined +(define_insn_reservation "cell-mfjmpr" 1 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "cell")) + "slot01+bru_cell") + +;;mtlr and mtctr, +;;mtspr fully pipelined +(define_insn_reservation "cell-mtjmpr" 1 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "cell")) + "bru_cell+slot01") + +;; Branches +;; b, ba, bl, bla, unconditional branch always predicts correctly n/a latency +;; bcctr, bcctrl, latency 2, actually adjust by be to 4 +(define_insn_reservation "cell-branch" 1 + (and (eq_attr "type" "branch") + (eq_attr "cpu" "cell")) + "bru_cell+slot1") + +(define_insn_reservation "cell-branchreg" 1 + (and (eq_attr "type" "jmpreg") + (eq_attr "cpu" "cell")) + "bru_cell+slot1") + +;; cr hazard +;; page 90, special cases for CR hazard, only one instr can access cr per cycle +;; if insn reads CR following a stwcx, pipeline stall till stwcx finish +(define_insn_reservation "cell-crlogical" 1 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "cell")) + "bru_cell+slot01") + +;; mfcrf and mfcr is about 34 cycles and nonpipelined +(define_insn_reservation "cell-mfcr" 34 + (and (eq_attr "type" "mfcrf,mfcr") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*32") + +;; mtcrf (1 field) +(define_insn_reservation "cell-mtcrf" 1 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "cell")) + "fxu_cell+slot01") + +; Basic FP latency is 10 cycles, thoughput is 1/cycle +(define_insn_reservation "cell-fp" 10 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "cell")) + "slot01,vsu1_cell,vsu1_cell*8") + +(define_insn_reservation "cell-fpcompare" 1 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "cell")) + "vsu1_cell+slot01") + +;; sdiv thoughput 1/74, not pipelined but only in the FPU +(define_insn_reservation "cell-sdiv" 74 + (and (eq_attr "type" "sdiv,ddiv") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*72") + +;; fsqrt thoughput 1/84, not pipelined but only in the FPU +(define_insn_reservation "cell-sqrt" 84 + (and (eq_attr "type" "ssqrt,dsqrt") + (eq_attr "cpu" "cell")) + "slot1,nonpipeline,nonpipeline*82") + +; VMX +(define_insn_reservation "cell-vecsimple" 4 + (and (eq_attr "type" "vecsimple") + (eq_attr "cpu" "cell")) + "slot01,vsu1_cell,vsu1_cell*2") + +;; mult, div, madd +(define_insn_reservation "cell-veccomplex" 10 + (and (eq_attr "type" "veccomplex") + (eq_attr "cpu" "cell")) + "slot01,vsu1_cell,vsu1_cell*8") + +;; TODO: add support for recording instructions +(define_insn_reservation "cell-veccmp" 4 + (and (eq_attr "type" "veccmp") + (eq_attr "cpu" "cell")) + "slot01,vsu1_cell,vsu1_cell*2") + +(define_insn_reservation "cell-vecfloat" 12 + (and (eq_attr "type" "vecfloat") + (eq_attr "cpu" "cell")) + "slot01,vsu1_cell,vsu1_cell*10") + +(define_insn_reservation "cell-vecperm" 4 + (and (eq_attr "type" "vecperm") + (eq_attr "cpu" "cell")) + "slot01,vsu2_cell,vsu2_cell*2") + +;; New for 4.2, syncs + +(define_insn_reservation "cell-sync" 11 + (and (eq_attr "type" "sync") + (eq_attr "cpu" "cell")) + "slot01,lsu_cell,lsu_cell*9") + +(define_insn_reservation "cell-isync" 11 + (and (eq_attr "type" "isync") + (eq_attr "cpu" "cell")) + "slot01,lsu_cell,lsu_cell*9") + +(define_insn_reservation "cell-load_l" 11 + (and (eq_attr "type" "load_l") + (eq_attr "cpu" "cell")) + "slot01,lsu_cell,lsu_cell*9") + +(define_insn_reservation "cell-store_c" 11 + (and (eq_attr "type" "store_c") + (eq_attr "cpu" "cell")) + "slot01,lsu_cell,lsu_cell*9") + +;; RAW register dependency + +;; addi r3, r3, 1 +;; lw r4,offset(r3) +;; there are 5 cycle deplay for r3 bypassing +;; there are 5 cycle delay for a dependent load after a load +(define_bypass 5 "cell-integer" "cell-load") +(define_bypass 5 "cell-integer" "cell-load-ext") +(define_bypass 5 "cell-load,cell-load-ext" "cell-load,cell-load-ext") + +;; there is a 6 cycle delay after a fp compare until you can use the cr. +(define_bypass 6 "cell-fpcompare" "cell-branch,cell-branchreg,cell-mfcr,cell-crlogical") + +;; VXU float RAW +(define_bypass 11 "cell-vecfloat" "cell-vecfloat") + +;; VXU and FPU +(define_bypass 6 "cell-veccomplex" "cell-vecsimple") +;;(define_bypass 6 "cell-veccompare" "cell-branch,cell-branchreg") +(define_bypass 3 "cell-vecfloat" "cell-veccomplex") +; this is not correct, +;; this is a stall in general and not dependent on result +(define_bypass 13 "cell-vecstore" "cell-fpstore") +; this is not correct, this can never be true, not dependent on result +(define_bypass 7 "cell-fp" "cell-fpload") +;; vsu1 should avoid writing to the same target register as vsu2 insn +;; within 12 cycles. + +;; WAW hazard + +;; the target of VSU estimate should not be reused within 10 dispatch groups +;; the target of VSU float should not be reused within 8 dispatch groups +;; the target of VSU complex should not be reused within 5 dispatch groups +;; FP LOAD should not reuse an FPU Arithmetic target with 6 dispatch gropus + +;; mtctr-bcctr/bcctrl, branch target ctr register shadow update at +;; ex4 stage(10 cycles) +(define_bypass 10 "cell-mtjmpr" "cell-branchreg") + +;;Things are not simulated: +;; update instruction, update address gpr are not simulated +;; vrefp, vrsqrtefp have latency(14), currently simulated as 12 cycle float +;; insns + diff --git a/gcc/config/rs6000/constraints.md b/gcc/config/rs6000/constraints.md new file mode 100644 index 000000000..71b3b207e --- /dev/null +++ b/gcc/config/rs6000/constraints.md @@ -0,0 +1,201 @@ +;; Constraint definitions for RS6000 +;; Copyright (C) 2006, 2007, 2009, 2010 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; Available constraint letters: "e", "k", "u", "A", "B", "C", "D" + +;; Register constraints + +(define_register_constraint "f" "rs6000_constraints[RS6000_CONSTRAINT_f]" + "@internal") + +(define_register_constraint "d" "rs6000_constraints[RS6000_CONSTRAINT_d]" + "@internal") + +(define_register_constraint "b" "BASE_REGS" + "@internal") + +(define_register_constraint "h" "SPECIAL_REGS" + "@internal") + +(define_register_constraint "q" "MQ_REGS" + "@internal") + +(define_register_constraint "c" "CTR_REGS" + "@internal") + +(define_register_constraint "l" "LINK_REGS" + "@internal") + +(define_register_constraint "v" "ALTIVEC_REGS" + "@internal") + +(define_register_constraint "x" "CR0_REGS" + "@internal") + +(define_register_constraint "y" "CR_REGS" + "@internal") + +(define_register_constraint "z" "CA_REGS" + "@internal") + +;; Use w as a prefix to add VSX modes +;; vector double (V2DF) +(define_register_constraint "wd" "rs6000_constraints[RS6000_CONSTRAINT_wd]" + "@internal") + +;; vector float (V4SF) +(define_register_constraint "wf" "rs6000_constraints[RS6000_CONSTRAINT_wf]" + "@internal") + +;; scalar double (DF) +(define_register_constraint "ws" "rs6000_constraints[RS6000_CONSTRAINT_ws]" + "@internal") + +;; any VSX register +(define_register_constraint "wa" "rs6000_constraints[RS6000_CONSTRAINT_wa]" + "@internal") + +;; Altivec style load/store that ignores the bottom bits of the address +(define_memory_constraint "wZ" + "Indexed or indirect memory operand, ignoring the bottom 4 bits" + (match_operand 0 "altivec_indexed_or_indirect_operand")) + +;; Integer constraints + +(define_constraint "I" + "A signed 16-bit constant" + (and (match_code "const_int") + (match_test "(unsigned HOST_WIDE_INT) (ival + 0x8000) < 0x10000"))) + +(define_constraint "J" + "high-order 16 bits nonzero" + (and (match_code "const_int") + (match_test "(ival & (~ (unsigned HOST_WIDE_INT) 0xffff0000)) == 0"))) + +(define_constraint "K" + "low-order 16 bits nonzero" + (and (match_code "const_int") + (match_test "(ival & (~ (HOST_WIDE_INT) 0xffff)) == 0"))) + +(define_constraint "L" + "signed 16-bit constant shifted left 16 bits" + (and (match_code "const_int") + (match_test "((ival & 0xffff) == 0 + && (ival >> 31 == -1 || ival >> 31 == 0))"))) + +(define_constraint "M" + "constant greater than 31" + (and (match_code "const_int") + (match_test "ival > 31"))) + +(define_constraint "N" + "positive constant that is an exact power of two" + (and (match_code "const_int") + (match_test "ival > 0 && exact_log2 (ival) >= 0"))) + +(define_constraint "O" + "constant zero" + (and (match_code "const_int") + (match_test "ival == 0"))) + +(define_constraint "P" + "constant whose negation is signed 16-bit constant" + (and (match_code "const_int") + (match_test "(unsigned HOST_WIDE_INT) ((- ival) + 0x8000) < 0x10000"))) + +;; Floating-point constraints + +(define_constraint "G" + "Constant that can be copied into GPR with two insns for DF/DI + and one for SF." + (and (match_code "const_double") + (match_test "num_insns_constant (op, mode) + == (mode == SFmode ? 1 : 2)"))) + +(define_constraint "H" + "DF/DI constant that takes three insns." + (and (match_code "const_double") + (match_test "num_insns_constant (op, mode) == 3"))) + +;; Memory constraints + +(define_memory_constraint "es" + "A ``stable'' memory operand; that is, one which does not include any +automodification of the base register. Unlike @samp{m}, this constraint +can be used in @code{asm} statements that might access the operand +several times, or that might not access it at all." + (and (match_code "mem") + (match_test "GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != RTX_AUTOINC"))) + +(define_memory_constraint "Q" + "Memory operand that is an offset from a register (it is usually better +to use @samp{m} or @samp{es} in @code{asm} statements)" + (and (match_code "mem") + (match_test "GET_CODE (XEXP (op, 0)) == REG"))) + +(define_memory_constraint "Y" + "Indexed or word-aligned displacement memory operand" + (match_operand 0 "word_offset_memref_operand")) + +(define_memory_constraint "Z" + "Memory operand that is an indexed or indirect from a register (it is +usually better to use @samp{m} or @samp{es} in @code{asm} statements)" + (match_operand 0 "indexed_or_indirect_operand")) + +;; Address constraints + +(define_address_constraint "a" + "Indexed or indirect address operand" + (match_operand 0 "indexed_or_indirect_address")) + +(define_constraint "R" + "AIX TOC entry" + (match_test "legitimate_constant_pool_address_p (op, QImode, false)")) + +;; General constraints + +(define_constraint "S" + "Constant that can be placed into a 64-bit mask operand" + (match_operand 0 "mask64_operand")) + +(define_constraint "T" + "Constant that can be placed into a 32-bit mask operand" + (match_operand 0 "mask_operand")) + +(define_constraint "U" + "V.4 small data reference" + (and (match_test "DEFAULT_ABI == ABI_V4") + (match_operand 0 "small_data_operand"))) + +(define_constraint "t" + "AND masks that can be performed by two rldic{l,r} insns + (but excluding those that could match other constraints of anddi3)" + (and (and (and (match_operand 0 "mask64_2_operand") + (match_test "(fixed_regs[CR0_REGNO] + || !logical_operand (op, DImode))")) + (not (match_operand 0 "mask_operand"))) + (not (match_operand 0 "mask64_operand")))) + +(define_constraint "W" + "vector constant that does not require memory" + (match_operand 0 "easy_vector_constant")) + +(define_constraint "j" + "Zero vector constant" + (match_test "(op == const0_rtx || op == CONST0_RTX (GET_MODE (op)))")) diff --git a/gcc/config/rs6000/crtresfpr.asm b/gcc/config/rs6000/crtresfpr.asm new file mode 100644 index 000000000..9fb228cf4 --- /dev/null +++ b/gcc/config/rs6000/crtresfpr.asm @@ -0,0 +1,81 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009 + * Free Software Foundation, Inc. + * Written By Michael Meissner + * 64-bit support written by David Edelsohn + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Do any initializations needed for the eabi environment */ + + .section ".text" + #include "ppc-asm.h" + +/* On PowerPC64 Linux, these functions are provided by the linker. */ +#ifndef __powerpc64__ + +/* Routines for restoring floating point registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the floating point save area. */ + +CFI_STARTPROC +HIDDEN_FUNC(_restfpr_14) lfd 14,-144(11) /* restore fp registers */ +HIDDEN_FUNC(_restfpr_15) lfd 15,-136(11) +HIDDEN_FUNC(_restfpr_16) lfd 16,-128(11) +HIDDEN_FUNC(_restfpr_17) lfd 17,-120(11) +HIDDEN_FUNC(_restfpr_18) lfd 18,-112(11) +HIDDEN_FUNC(_restfpr_19) lfd 19,-104(11) +HIDDEN_FUNC(_restfpr_20) lfd 20,-96(11) +HIDDEN_FUNC(_restfpr_21) lfd 21,-88(11) +HIDDEN_FUNC(_restfpr_22) lfd 22,-80(11) +HIDDEN_FUNC(_restfpr_23) lfd 23,-72(11) +HIDDEN_FUNC(_restfpr_24) lfd 24,-64(11) +HIDDEN_FUNC(_restfpr_25) lfd 25,-56(11) +HIDDEN_FUNC(_restfpr_26) lfd 26,-48(11) +HIDDEN_FUNC(_restfpr_27) lfd 27,-40(11) +HIDDEN_FUNC(_restfpr_28) lfd 28,-32(11) +HIDDEN_FUNC(_restfpr_29) lfd 29,-24(11) +HIDDEN_FUNC(_restfpr_30) lfd 30,-16(11) +HIDDEN_FUNC(_restfpr_31) lfd 31,-8(11) + blr +FUNC_END(_restfpr_31) +FUNC_END(_restfpr_30) +FUNC_END(_restfpr_29) +FUNC_END(_restfpr_28) +FUNC_END(_restfpr_27) +FUNC_END(_restfpr_26) +FUNC_END(_restfpr_25) +FUNC_END(_restfpr_24) +FUNC_END(_restfpr_23) +FUNC_END(_restfpr_22) +FUNC_END(_restfpr_21) +FUNC_END(_restfpr_20) +FUNC_END(_restfpr_19) +FUNC_END(_restfpr_18) +FUNC_END(_restfpr_17) +FUNC_END(_restfpr_16) +FUNC_END(_restfpr_15) +FUNC_END(_restfpr_14) +CFI_ENDPROC + +#endif diff --git a/gcc/config/rs6000/crtresgpr.asm b/gcc/config/rs6000/crtresgpr.asm new file mode 100644 index 000000000..9f9cec9f9 --- /dev/null +++ b/gcc/config/rs6000/crtresgpr.asm @@ -0,0 +1,81 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009 + * Free Software Foundation, Inc. + * Written By Michael Meissner + * 64-bit support written by David Edelsohn + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Do any initializations needed for the eabi environment */ + + .section ".text" + #include "ppc-asm.h" + +/* On PowerPC64 Linux, these functions are provided by the linker. */ +#ifndef __powerpc64__ + +/* Routines for restoring integer registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the integer restore area. */ + +CFI_STARTPROC +HIDDEN_FUNC(_restgpr_14) lwz 14,-72(11) /* restore gp registers */ +HIDDEN_FUNC(_restgpr_15) lwz 15,-68(11) +HIDDEN_FUNC(_restgpr_16) lwz 16,-64(11) +HIDDEN_FUNC(_restgpr_17) lwz 17,-60(11) +HIDDEN_FUNC(_restgpr_18) lwz 18,-56(11) +HIDDEN_FUNC(_restgpr_19) lwz 19,-52(11) +HIDDEN_FUNC(_restgpr_20) lwz 20,-48(11) +HIDDEN_FUNC(_restgpr_21) lwz 21,-44(11) +HIDDEN_FUNC(_restgpr_22) lwz 22,-40(11) +HIDDEN_FUNC(_restgpr_23) lwz 23,-36(11) +HIDDEN_FUNC(_restgpr_24) lwz 24,-32(11) +HIDDEN_FUNC(_restgpr_25) lwz 25,-28(11) +HIDDEN_FUNC(_restgpr_26) lwz 26,-24(11) +HIDDEN_FUNC(_restgpr_27) lwz 27,-20(11) +HIDDEN_FUNC(_restgpr_28) lwz 28,-16(11) +HIDDEN_FUNC(_restgpr_29) lwz 29,-12(11) +HIDDEN_FUNC(_restgpr_30) lwz 30,-8(11) +HIDDEN_FUNC(_restgpr_31) lwz 31,-4(11) + blr +FUNC_END(_restgpr_31) +FUNC_END(_restgpr_30) +FUNC_END(_restgpr_29) +FUNC_END(_restgpr_28) +FUNC_END(_restgpr_27) +FUNC_END(_restgpr_26) +FUNC_END(_restgpr_25) +FUNC_END(_restgpr_24) +FUNC_END(_restgpr_23) +FUNC_END(_restgpr_22) +FUNC_END(_restgpr_21) +FUNC_END(_restgpr_20) +FUNC_END(_restgpr_19) +FUNC_END(_restgpr_18) +FUNC_END(_restgpr_17) +FUNC_END(_restgpr_16) +FUNC_END(_restgpr_15) +FUNC_END(_restgpr_14) +CFI_ENDPROC + +#endif diff --git a/gcc/config/rs6000/crtresxfpr.asm b/gcc/config/rs6000/crtresxfpr.asm new file mode 100644 index 000000000..633f2db61 --- /dev/null +++ b/gcc/config/rs6000/crtresxfpr.asm @@ -0,0 +1,126 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009 + * Free Software Foundation, Inc. + * Written By Michael Meissner + * 64-bit support written by David Edelsohn + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Do any initializations needed for the eabi environment */ + + .section ".text" + #include "ppc-asm.h" + +/* On PowerPC64 Linux, these functions are provided by the linker. */ +#ifndef __powerpc64__ + +/* Routines for restoring floating point registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the floating point save area. */ +/* In addition to restoring the fp registers, it will return to the caller's */ +/* caller */ + +CFI_STARTPROC +CFI_DEF_CFA_REGISTER (11) +CFI_OFFSET (65, 4) +CFI_OFFSET (46, -144) +CFI_OFFSET (47, -136) +CFI_OFFSET (48, -128) +CFI_OFFSET (49, -120) +CFI_OFFSET (50, -112) +CFI_OFFSET (51, -104) +CFI_OFFSET (52, -96) +CFI_OFFSET (53, -88) +CFI_OFFSET (54, -80) +CFI_OFFSET (55, -72) +CFI_OFFSET (56, -64) +CFI_OFFSET (57, -56) +CFI_OFFSET (58, -48) +CFI_OFFSET (59, -40) +CFI_OFFSET (60, -32) +CFI_OFFSET (61, -24) +CFI_OFFSET (62, -16) +CFI_OFFSET (63, -8) +HIDDEN_FUNC(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */ +CFI_RESTORE (46) +HIDDEN_FUNC(_restfpr_15_x) lfd 15,-136(11) +CFI_RESTORE (47) +HIDDEN_FUNC(_restfpr_16_x) lfd 16,-128(11) +CFI_RESTORE (48) +HIDDEN_FUNC(_restfpr_17_x) lfd 17,-120(11) +CFI_RESTORE (49) +HIDDEN_FUNC(_restfpr_18_x) lfd 18,-112(11) +CFI_RESTORE (50) +HIDDEN_FUNC(_restfpr_19_x) lfd 19,-104(11) +CFI_RESTORE (51) +HIDDEN_FUNC(_restfpr_20_x) lfd 20,-96(11) +CFI_RESTORE (52) +HIDDEN_FUNC(_restfpr_21_x) lfd 21,-88(11) +CFI_RESTORE (53) +HIDDEN_FUNC(_restfpr_22_x) lfd 22,-80(11) +CFI_RESTORE (54) +HIDDEN_FUNC(_restfpr_23_x) lfd 23,-72(11) +CFI_RESTORE (55) +HIDDEN_FUNC(_restfpr_24_x) lfd 24,-64(11) +CFI_RESTORE (56) +HIDDEN_FUNC(_restfpr_25_x) lfd 25,-56(11) +CFI_RESTORE (57) +HIDDEN_FUNC(_restfpr_26_x) lfd 26,-48(11) +CFI_RESTORE (58) +HIDDEN_FUNC(_restfpr_27_x) lfd 27,-40(11) +CFI_RESTORE (59) +HIDDEN_FUNC(_restfpr_28_x) lfd 28,-32(11) +CFI_RESTORE (60) +HIDDEN_FUNC(_restfpr_29_x) lfd 29,-24(11) +CFI_RESTORE (61) +HIDDEN_FUNC(_restfpr_30_x) lfd 30,-16(11) +CFI_RESTORE (62) +HIDDEN_FUNC(_restfpr_31_x) lwz 0,4(11) + lfd 31,-8(11) +CFI_RESTORE (63) + mtlr 0 +CFI_RESTORE (65) + mr 1,11 +CFI_DEF_CFA_REGISTER (1) + blr +FUNC_END(_restfpr_31_x) +FUNC_END(_restfpr_30_x) +FUNC_END(_restfpr_29_x) +FUNC_END(_restfpr_28_x) +FUNC_END(_restfpr_27_x) +FUNC_END(_restfpr_26_x) +FUNC_END(_restfpr_25_x) +FUNC_END(_restfpr_24_x) +FUNC_END(_restfpr_23_x) +FUNC_END(_restfpr_22_x) +FUNC_END(_restfpr_21_x) +FUNC_END(_restfpr_20_x) +FUNC_END(_restfpr_19_x) +FUNC_END(_restfpr_18_x) +FUNC_END(_restfpr_17_x) +FUNC_END(_restfpr_16_x) +FUNC_END(_restfpr_15_x) +FUNC_END(_restfpr_14_x) +CFI_ENDPROC + +#endif diff --git a/gcc/config/rs6000/crtresxgpr.asm b/gcc/config/rs6000/crtresxgpr.asm new file mode 100644 index 000000000..451b2b69d --- /dev/null +++ b/gcc/config/rs6000/crtresxgpr.asm @@ -0,0 +1,124 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009 + * Free Software Foundation, Inc. + * Written By Michael Meissner + * 64-bit support written by David Edelsohn + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Do any initializations needed for the eabi environment */ + + .section ".text" + #include "ppc-asm.h" + +/* On PowerPC64 Linux, these functions are provided by the linker. */ +#ifndef __powerpc64__ + +/* Routines for restoring integer registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the integer restore area. */ + +CFI_STARTPROC +CFI_DEF_CFA_REGISTER (11) +CFI_OFFSET (65, 4) +CFI_OFFSET (14, -72) +CFI_OFFSET (15, -68) +CFI_OFFSET (16, -64) +CFI_OFFSET (17, -60) +CFI_OFFSET (18, -56) +CFI_OFFSET (19, -52) +CFI_OFFSET (20, -48) +CFI_OFFSET (21, -44) +CFI_OFFSET (22, -40) +CFI_OFFSET (23, -36) +CFI_OFFSET (24, -32) +CFI_OFFSET (25, -28) +CFI_OFFSET (26, -24) +CFI_OFFSET (27, -20) +CFI_OFFSET (28, -16) +CFI_OFFSET (29, -12) +CFI_OFFSET (30, -8) +CFI_OFFSET (31, -4) +HIDDEN_FUNC(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */ +CFI_RESTORE (14) +HIDDEN_FUNC(_restgpr_15_x) lwz 15,-68(11) +CFI_RESTORE (15) +HIDDEN_FUNC(_restgpr_16_x) lwz 16,-64(11) +CFI_RESTORE (16) +HIDDEN_FUNC(_restgpr_17_x) lwz 17,-60(11) +CFI_RESTORE (17) +HIDDEN_FUNC(_restgpr_18_x) lwz 18,-56(11) +CFI_RESTORE (18) +HIDDEN_FUNC(_restgpr_19_x) lwz 19,-52(11) +CFI_RESTORE (19) +HIDDEN_FUNC(_restgpr_20_x) lwz 20,-48(11) +CFI_RESTORE (20) +HIDDEN_FUNC(_restgpr_21_x) lwz 21,-44(11) +CFI_RESTORE (21) +HIDDEN_FUNC(_restgpr_22_x) lwz 22,-40(11) +CFI_RESTORE (22) +HIDDEN_FUNC(_restgpr_23_x) lwz 23,-36(11) +CFI_RESTORE (23) +HIDDEN_FUNC(_restgpr_24_x) lwz 24,-32(11) +CFI_RESTORE (24) +HIDDEN_FUNC(_restgpr_25_x) lwz 25,-28(11) +CFI_RESTORE (25) +HIDDEN_FUNC(_restgpr_26_x) lwz 26,-24(11) +CFI_RESTORE (26) +HIDDEN_FUNC(_restgpr_27_x) lwz 27,-20(11) +CFI_RESTORE (27) +HIDDEN_FUNC(_restgpr_28_x) lwz 28,-16(11) +CFI_RESTORE (28) +HIDDEN_FUNC(_restgpr_29_x) lwz 29,-12(11) +CFI_RESTORE (29) +HIDDEN_FUNC(_restgpr_30_x) lwz 30,-8(11) +CFI_RESTORE (30) +HIDDEN_FUNC(_restgpr_31_x) lwz 0,4(11) + lwz 31,-4(11) +CFI_RESTORE (31) + mtlr 0 +CFI_RESTORE (65) + mr 1,11 +CFI_DEF_CFA_REGISTER (1) + blr +FUNC_END(_restgpr_31_x) +FUNC_END(_restgpr_30_x) +FUNC_END(_restgpr_29_x) +FUNC_END(_restgpr_28_x) +FUNC_END(_restgpr_27_x) +FUNC_END(_restgpr_26_x) +FUNC_END(_restgpr_25_x) +FUNC_END(_restgpr_24_x) +FUNC_END(_restgpr_23_x) +FUNC_END(_restgpr_22_x) +FUNC_END(_restgpr_21_x) +FUNC_END(_restgpr_20_x) +FUNC_END(_restgpr_19_x) +FUNC_END(_restgpr_18_x) +FUNC_END(_restgpr_17_x) +FUNC_END(_restgpr_16_x) +FUNC_END(_restgpr_15_x) +FUNC_END(_restgpr_14_x) +CFI_ENDPROC + +#endif diff --git a/gcc/config/rs6000/crtsavfpr.asm b/gcc/config/rs6000/crtsavfpr.asm new file mode 100644 index 000000000..3cdb25033 --- /dev/null +++ b/gcc/config/rs6000/crtsavfpr.asm @@ -0,0 +1,81 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009 + * Free Software Foundation, Inc. + * Written By Michael Meissner + * 64-bit support written by David Edelsohn + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Do any initializations needed for the eabi environment */ + + .section ".text" + #include "ppc-asm.h" + +/* On PowerPC64 Linux, these functions are provided by the linker. */ +#ifndef __powerpc64__ + +/* Routines for saving floating point registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the floating point save area. */ + +CFI_STARTPROC +HIDDEN_FUNC(_savefpr_14) stfd 14,-144(11) /* save fp registers */ +HIDDEN_FUNC(_savefpr_15) stfd 15,-136(11) +HIDDEN_FUNC(_savefpr_16) stfd 16,-128(11) +HIDDEN_FUNC(_savefpr_17) stfd 17,-120(11) +HIDDEN_FUNC(_savefpr_18) stfd 18,-112(11) +HIDDEN_FUNC(_savefpr_19) stfd 19,-104(11) +HIDDEN_FUNC(_savefpr_20) stfd 20,-96(11) +HIDDEN_FUNC(_savefpr_21) stfd 21,-88(11) +HIDDEN_FUNC(_savefpr_22) stfd 22,-80(11) +HIDDEN_FUNC(_savefpr_23) stfd 23,-72(11) +HIDDEN_FUNC(_savefpr_24) stfd 24,-64(11) +HIDDEN_FUNC(_savefpr_25) stfd 25,-56(11) +HIDDEN_FUNC(_savefpr_26) stfd 26,-48(11) +HIDDEN_FUNC(_savefpr_27) stfd 27,-40(11) +HIDDEN_FUNC(_savefpr_28) stfd 28,-32(11) +HIDDEN_FUNC(_savefpr_29) stfd 29,-24(11) +HIDDEN_FUNC(_savefpr_30) stfd 30,-16(11) +HIDDEN_FUNC(_savefpr_31) stfd 31,-8(11) + blr +FUNC_END(_savefpr_31) +FUNC_END(_savefpr_30) +FUNC_END(_savefpr_29) +FUNC_END(_savefpr_28) +FUNC_END(_savefpr_27) +FUNC_END(_savefpr_26) +FUNC_END(_savefpr_25) +FUNC_END(_savefpr_24) +FUNC_END(_savefpr_23) +FUNC_END(_savefpr_22) +FUNC_END(_savefpr_21) +FUNC_END(_savefpr_20) +FUNC_END(_savefpr_19) +FUNC_END(_savefpr_18) +FUNC_END(_savefpr_17) +FUNC_END(_savefpr_16) +FUNC_END(_savefpr_15) +FUNC_END(_savefpr_14) +CFI_ENDPROC + +#endif diff --git a/gcc/config/rs6000/crtsavgpr.asm b/gcc/config/rs6000/crtsavgpr.asm new file mode 100644 index 000000000..6d473963b --- /dev/null +++ b/gcc/config/rs6000/crtsavgpr.asm @@ -0,0 +1,81 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009 + * Free Software Foundation, Inc. + * Written By Michael Meissner + * 64-bit support written by David Edelsohn + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Do any initializations needed for the eabi environment */ + + .section ".text" + #include "ppc-asm.h" + +/* On PowerPC64 Linux, these functions are provided by the linker. */ +#ifndef __powerpc64__ + +/* Routines for saving integer registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the integer save area. */ + +CFI_STARTPROC +HIDDEN_FUNC(_savegpr_14) stw 14,-72(11) /* save gp registers */ +HIDDEN_FUNC(_savegpr_15) stw 15,-68(11) +HIDDEN_FUNC(_savegpr_16) stw 16,-64(11) +HIDDEN_FUNC(_savegpr_17) stw 17,-60(11) +HIDDEN_FUNC(_savegpr_18) stw 18,-56(11) +HIDDEN_FUNC(_savegpr_19) stw 19,-52(11) +HIDDEN_FUNC(_savegpr_20) stw 20,-48(11) +HIDDEN_FUNC(_savegpr_21) stw 21,-44(11) +HIDDEN_FUNC(_savegpr_22) stw 22,-40(11) +HIDDEN_FUNC(_savegpr_23) stw 23,-36(11) +HIDDEN_FUNC(_savegpr_24) stw 24,-32(11) +HIDDEN_FUNC(_savegpr_25) stw 25,-28(11) +HIDDEN_FUNC(_savegpr_26) stw 26,-24(11) +HIDDEN_FUNC(_savegpr_27) stw 27,-20(11) +HIDDEN_FUNC(_savegpr_28) stw 28,-16(11) +HIDDEN_FUNC(_savegpr_29) stw 29,-12(11) +HIDDEN_FUNC(_savegpr_30) stw 30,-8(11) +HIDDEN_FUNC(_savegpr_31) stw 31,-4(11) + blr +FUNC_END(_savegpr_31) +FUNC_END(_savegpr_30) +FUNC_END(_savegpr_29) +FUNC_END(_savegpr_28) +FUNC_END(_savegpr_27) +FUNC_END(_savegpr_26) +FUNC_END(_savegpr_25) +FUNC_END(_savegpr_24) +FUNC_END(_savegpr_23) +FUNC_END(_savegpr_22) +FUNC_END(_savegpr_21) +FUNC_END(_savegpr_20) +FUNC_END(_savegpr_19) +FUNC_END(_savegpr_18) +FUNC_END(_savegpr_17) +FUNC_END(_savegpr_16) +FUNC_END(_savegpr_15) +FUNC_END(_savegpr_14) +CFI_ENDPROC + +#endif diff --git a/gcc/config/rs6000/darwin-asm.h b/gcc/config/rs6000/darwin-asm.h new file mode 100644 index 000000000..837b7a33e --- /dev/null +++ b/gcc/config/rs6000/darwin-asm.h @@ -0,0 +1,51 @@ +/* Macro definitions to used to support 32/64-bit code in Darwin's + * assembly files. + * + * Copyright (C) 2004, 2009 Free Software Foundation, Inc. + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* These are donated from /usr/include/architecture/ppc . */ + +#if defined(__ppc64__) +#define MODE_CHOICE(x, y) y +#else +#define MODE_CHOICE(x, y) x +#endif + +#define cmpg MODE_CHOICE(cmpw, cmpd) +#define lg MODE_CHOICE(lwz, ld) +#define stg MODE_CHOICE(stw, std) +#define lgx MODE_CHOICE(lwzx, ldx) +#define stgx MODE_CHOICE(stwx, stdx) +#define lgu MODE_CHOICE(lwzu, ldu) +#define stgu MODE_CHOICE(stwu, stdu) +#define lgux MODE_CHOICE(lwzux, ldux) +#define stgux MODE_CHOICE(stwux, stdux) +#define lgwa MODE_CHOICE(lwz, lwa) + +#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */ + +#define GPR_BYTES MODE_CHOICE(4,8) /* size of a GPR in bytes */ +#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */ + +#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* position of saved + LR in frame */ diff --git a/gcc/config/rs6000/darwin-fallback.c b/gcc/config/rs6000/darwin-fallback.c new file mode 100644 index 000000000..4591071ea --- /dev/null +++ b/gcc/config/rs6000/darwin-fallback.c @@ -0,0 +1,487 @@ +/* Fallback frame-state unwinder for Darwin. + Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifdef __ppc__ + +#include "tconfig.h" +#include "tsystem.h" +#include "coretypes.h" +#include "tm.h" +#include "dwarf2.h" +#include "unwind.h" +#include "unwind-dw2.h" +#include +#include +#include +#include + +#define R_LR 65 +#define R_CTR 66 +#define R_CR2 70 +#define R_XER 76 +#define R_VR0 77 +#define R_VRSAVE 109 +#define R_VSCR 110 +#define R_SPEFSCR 112 + +typedef unsigned long reg_unit; + +/* Place in GPRS the parameters to the first 'sc' instruction that would + have been executed if we were returning from this CONTEXT, or + return false if an unexpected instruction is encountered. */ + +static bool +interpret_libc (reg_unit gprs[32], struct _Unwind_Context *context) +{ + uint32_t *pc = (uint32_t *)_Unwind_GetIP (context); + uint32_t cr; + reg_unit lr = (reg_unit) pc; + reg_unit ctr = 0; + uint32_t *invalid_address = NULL; + + int i; + + for (i = 0; i < 13; i++) + gprs[i] = 1; + gprs[1] = _Unwind_GetCFA (context); + for (; i < 32; i++) + gprs[i] = _Unwind_GetGR (context, i); + cr = _Unwind_GetGR (context, R_CR2); + + /* For each supported Libc, we have to track the code flow + all the way back into the kernel. + + This code is believed to support all released Libc/Libsystem builds since + Jaguar 6C115, including all the security updates. To be precise, + + Libc Libsystem Build(s) + 262~1 60~37 6C115 + 262~1 60.2~4 6D52 + 262~1 61~3 6F21-6F22 + 262~1 63~24 6G30-6G37 + 262~1 63~32 6I34-6I35 + 262~1 63~64 6L29-6L60 + 262.4.1~1 63~84 6L123-6R172 + + 320~1 71~101 7B85-7D28 + 320~1 71~266 7F54-7F56 + 320~1 71~288 7F112 + 320~1 71~289 7F113 + 320.1.3~1 71.1.1~29 7H60-7H105 + 320.1.3~1 71.1.1~30 7H110-7H113 + 320.1.3~1 71.1.1~31 7H114 + + That's a big table! It would be insane to try to keep track of + every little detail, so we just read the code itself and do what + it would do. + */ + + for (;;) + { + uint32_t ins = *pc++; + + if ((ins & 0xFC000003) == 0x48000000) /* b instruction */ + { + pc += ((((int32_t) ins & 0x3FFFFFC) ^ 0x2000000) - 0x2000004) / 4; + continue; + } + if ((ins & 0xFC600000) == 0x2C000000) /* cmpwi */ + { + int32_t val1 = (int16_t) ins; + int32_t val2 = gprs[ins >> 16 & 0x1F]; + /* Only beq and bne instructions are supported, so we only + need to set the EQ bit. */ + uint32_t mask = 0xF << ((ins >> 21 & 0x1C) ^ 0x1C); + if (val1 == val2) + cr |= mask; + else + cr &= ~mask; + continue; + } + if ((ins & 0xFEC38003) == 0x40820000) /* forwards beq/bne */ + { + if ((cr >> ((ins >> 16 & 0x1F) ^ 0x1F) & 1) == (ins >> 24 & 1)) + pc += (ins & 0x7FFC) / 4 - 1; + continue; + } + if ((ins & 0xFC0007FF) == 0x7C000378) /* or, including mr */ + { + gprs [ins >> 16 & 0x1F] = (gprs [ins >> 11 & 0x1F] + | gprs [ins >> 21 & 0x1F]); + continue; + } + if (ins >> 26 == 0x0E) /* addi, including li */ + { + reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F]; + gprs [ins >> 21 & 0x1F] = src + (int16_t) ins; + continue; + } + if (ins >> 26 == 0x0F) /* addis, including lis */ + { + reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F]; + gprs [ins >> 21 & 0x1F] = src + ((int16_t) ins << 16); + continue; + } + if (ins >> 26 == 0x20) /* lwz */ + { + reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F]; + uint32_t *p = (uint32_t *)(src + (int16_t) ins); + if (p == invalid_address) + return false; + gprs [ins >> 21 & 0x1F] = *p; + continue; + } + if (ins >> 26 == 0x21) /* lwzu */ + { + uint32_t *p = (uint32_t *)(gprs [ins >> 16 & 0x1F] += (int16_t) ins); + if (p == invalid_address) + return false; + gprs [ins >> 21 & 0x1F] = *p; + continue; + } + if (ins >> 26 == 0x24) /* stw */ + /* What we hope this is doing is '--in_sigtramp'. We don't want + to actually store to memory, so just make a note of the + address and refuse to load from it. */ + { + reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F]; + uint32_t *p = (uint32_t *)(src + (int16_t) ins); + if (p == NULL || invalid_address != NULL) + return false; + invalid_address = p; + continue; + } + if (ins >> 26 == 0x2E) /* lmw */ + { + reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F]; + uint32_t *p = (uint32_t *)(src + (int16_t) ins); + int i; + + for (i = (ins >> 21 & 0x1F); i < 32; i++) + { + if (p == invalid_address) + return false; + gprs[i] = *p++; + } + continue; + } + if ((ins & 0xFC1FFFFF) == 0x7c0803a6) /* mtlr */ + { + lr = gprs [ins >> 21 & 0x1F]; + continue; + } + if ((ins & 0xFC1FFFFF) == 0x7c0802a6) /* mflr */ + { + gprs [ins >> 21 & 0x1F] = lr; + continue; + } + if ((ins & 0xFC1FFFFF) == 0x7c0903a6) /* mtctr */ + { + ctr = gprs [ins >> 21 & 0x1F]; + continue; + } + /* The PowerPC User's Manual says that bit 11 of the mtcrf + instruction is reserved and should be set to zero, but it + looks like the Darwin assembler doesn't do that... */ + if ((ins & 0xFC000FFF) == 0x7c000120) /* mtcrf */ + { + int i; + uint32_t mask = 0; + for (i = 0; i < 8; i++) + mask |= ((-(ins >> (12 + i) & 1)) & 0xF) << 4 * i; + cr = (cr & ~mask) | (gprs [ins >> 21 & 0x1F] & mask); + continue; + } + if (ins == 0x429f0005) /* bcl- 20,4*cr7+so,.+4, loads pc into LR */ + { + lr = (reg_unit) pc; + continue; + } + if (ins == 0x4e800420) /* bctr */ + { + pc = (uint32_t *) ctr; + continue; + } + if (ins == 0x44000002) /* sc */ + return true; + + return false; + } +} + +/* We used to include and , + but they change so much between different Darwin system versions + that it's much easier to just write the structures involved here + directly. */ + +/* These defines are from the kernel's bsd/dev/ppc/unix_signal.c. */ +#define UC_TRAD 1 +#define UC_TRAD_VEC 6 +#define UC_TRAD64 20 +#define UC_TRAD64_VEC 25 +#define UC_FLAVOR 30 +#define UC_FLAVOR_VEC 35 +#define UC_FLAVOR64 40 +#define UC_FLAVOR64_VEC 45 +#define UC_DUAL 50 +#define UC_DUAL_VEC 55 + +struct gcc_ucontext +{ + int onstack; + sigset_t sigmask; + void * stack_sp; + size_t stack_sz; + int stack_flags; + struct gcc_ucontext *link; + size_t mcsize; + struct gcc_mcontext32 *mcontext; +}; + +struct gcc_float_vector_state +{ + double fpregs[32]; + uint32_t fpscr_pad; + uint32_t fpscr; + uint32_t save_vr[32][4]; + uint32_t save_vscr[4]; +}; + +struct gcc_mcontext32 { + uint32_t dar; + uint32_t dsisr; + uint32_t exception; + uint32_t padding1[5]; + uint32_t srr0; + uint32_t srr1; + uint32_t gpr[32]; + uint32_t cr; + uint32_t xer; + uint32_t lr; + uint32_t ctr; + uint32_t mq; + uint32_t vrsave; + struct gcc_float_vector_state fvs; +}; + +/* These are based on /usr/include/ppc/ucontext.h and + /usr/include/mach/ppc/thread_status.h, but rewritten to be more + convenient, to compile on Jaguar, and to work around Radar 3712064 + on Panther, which is that the 'es' field of 'struct mcontext64' has + the wrong type (doh!). */ + +struct gcc_mcontext64 { + uint64_t dar; + uint32_t dsisr; + uint32_t exception; + uint32_t padding1[4]; + uint64_t srr0; + uint64_t srr1; + uint32_t gpr[32][2]; + uint32_t cr; + uint32_t xer[2]; /* These are arrays because the original structure has them misaligned. */ + uint32_t lr[2]; + uint32_t ctr[2]; + uint32_t vrsave; + struct gcc_float_vector_state fvs; +}; + +#define UC_FLAVOR_SIZE \ + (sizeof (struct gcc_mcontext32) - 33*16) + +#define UC_FLAVOR_VEC_SIZE (sizeof (struct gcc_mcontext32)) + +#define UC_FLAVOR64_SIZE \ + (sizeof (struct gcc_mcontext64) - 33*16) + +#define UC_FLAVOR64_VEC_SIZE (sizeof (struct gcc_mcontext64)) + +/* Given GPRS as input to a 'sc' instruction, and OLD_CFA, update FS + to represent the execution of a signal return; or, if not a signal + return, return false. */ + +static bool +handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32], + _Unwind_Ptr old_cfa) +{ + struct gcc_ucontext *uctx; + bool is_64, is_vector; + struct gcc_float_vector_state * float_vector_state; + _Unwind_Ptr new_cfa; + int i; + static _Unwind_Ptr return_addr; + + /* Yay! We're in a Libc that we understand, and it's made a + system call. In Jaguar, this is a direct system call with value 103; + in Panther and Tiger it is a SYS_syscall call for system call number 184, + and in Leopard it is a direct syscall with number 184. */ + + if (gprs[0] == 0x67 /* SYS_SIGRETURN */) + { + uctx = (struct gcc_ucontext *) gprs[3]; + is_vector = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE + || uctx->mcsize == UC_FLAVOR_VEC_SIZE); + is_64 = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE + || uctx->mcsize == UC_FLAVOR64_SIZE); + } + else if (gprs[0] == 0 /* SYS_syscall */ && gprs[3] == 184) + { + int ctxstyle = gprs[5]; + uctx = (struct gcc_ucontext *) gprs[4]; + is_vector = (ctxstyle == UC_FLAVOR_VEC || ctxstyle == UC_FLAVOR64_VEC + || ctxstyle == UC_TRAD_VEC || ctxstyle == UC_TRAD64_VEC); + is_64 = (ctxstyle == UC_FLAVOR64_VEC || ctxstyle == UC_TRAD64_VEC + || ctxstyle == UC_FLAVOR64 || ctxstyle == UC_TRAD64); + } + else if (gprs[0] == 184 /* SYS_sigreturn */) + { + int ctxstyle = gprs[4]; + uctx = (struct gcc_ucontext *) gprs[3]; + is_vector = (ctxstyle == UC_FLAVOR_VEC || ctxstyle == UC_FLAVOR64_VEC + || ctxstyle == UC_TRAD_VEC || ctxstyle == UC_TRAD64_VEC); + is_64 = (ctxstyle == UC_FLAVOR64_VEC || ctxstyle == UC_TRAD64_VEC + || ctxstyle == UC_FLAVOR64 || ctxstyle == UC_TRAD64); + } + else + return false; + +#define set_offset(r, addr) \ + (fs->regs.reg[r].how = REG_SAVED_OFFSET, \ + fs->regs.reg[r].loc.offset = (_Unwind_Ptr)(addr) - new_cfa) + + /* Restore even the registers that are not call-saved, since they + might be being used in the prologue to save other registers, + for instance GPR0 is sometimes used to save LR. */ + + /* Handle the GPRs, and produce the information needed to do the rest. */ + if (is_64) + { + /* The context is 64-bit, but it doesn't carry any extra information + for us because only the low 32 bits of the registers are + call-saved. */ + struct gcc_mcontext64 *m64 = (struct gcc_mcontext64 *)uctx->mcontext; + int i; + + float_vector_state = &m64->fvs; + + new_cfa = m64->gpr[1][1]; + + set_offset (R_CR2, &m64->cr); + for (i = 0; i < 32; i++) + set_offset (i, m64->gpr[i] + 1); + set_offset (R_XER, m64->xer + 1); + set_offset (R_LR, m64->lr + 1); + set_offset (R_CTR, m64->ctr + 1); + if (is_vector) + set_offset (R_VRSAVE, &m64->vrsave); + + /* Sometimes, srr0 points to the instruction that caused the exception, + and sometimes to the next instruction to be executed; we want + the latter. */ + if (m64->exception == 3 || m64->exception == 4 + || m64->exception == 6 + || (m64->exception == 7 && !(m64->srr1 & 0x10000))) + return_addr = m64->srr0 + 4; + else + return_addr = m64->srr0; + } + else + { + struct gcc_mcontext32 *m = uctx->mcontext; + int i; + + float_vector_state = &m->fvs; + + new_cfa = m->gpr[1]; + + set_offset (R_CR2, &m->cr); + for (i = 0; i < 32; i++) + set_offset (i, m->gpr + i); + set_offset (R_XER, &m->xer); + set_offset (R_LR, &m->lr); + set_offset (R_CTR, &m->ctr); + + if (is_vector) + set_offset (R_VRSAVE, &m->vrsave); + + /* Sometimes, srr0 points to the instruction that caused the exception, + and sometimes to the next instruction to be executed; we want + the latter. */ + if (m->exception == 3 || m->exception == 4 + || m->exception == 6 + || (m->exception == 7 && !(m->srr1 & 0x10000))) + return_addr = m->srr0 + 4; + else + return_addr = m->srr0; + } + + fs->regs.cfa_how = CFA_REG_OFFSET; + fs->regs.cfa_reg = STACK_POINTER_REGNUM; + fs->regs.cfa_offset = new_cfa - old_cfa;; + + /* The choice of column for the return address is somewhat tricky. + Fortunately, the actual choice is private to this file, and + the space it's reserved from is the GCC register space, not the + DWARF2 numbering. So any free element of the right size is an OK + choice. Thus: */ + fs->retaddr_column = ARG_POINTER_REGNUM; + /* FIXME: this should really be done using a DWARF2 location expression, + not using a static variable. In fact, this entire file should + be implemented in DWARF2 expressions. */ + set_offset (ARG_POINTER_REGNUM, &return_addr); + + for (i = 0; i < 32; i++) + set_offset (32 + i, float_vector_state->fpregs + i); + set_offset (R_SPEFSCR, &float_vector_state->fpscr); + + if (is_vector) + { + for (i = 0; i < 32; i++) + set_offset (R_VR0 + i, float_vector_state->save_vr + i); + set_offset (R_VSCR, float_vector_state->save_vscr); + } + + return true; +} + +/* This is also prototyped in rs6000/darwin.h, inside the + MD_FALLBACK_FRAME_STATE_FOR macro. */ +extern bool _Unwind_fallback_frame_state_for (struct _Unwind_Context *context, + _Unwind_FrameState *fs); + +/* Implement the MD_FALLBACK_FRAME_STATE_FOR macro, + returning true iff the frame was a sigreturn() frame that we + can understand. */ + +bool +_Unwind_fallback_frame_state_for (struct _Unwind_Context *context, + _Unwind_FrameState *fs) +{ + reg_unit gprs[32]; + + if (!interpret_libc (gprs, context)) + return false; + return handle_syscall (fs, gprs, _Unwind_GetCFA (context)); +} +#endif diff --git a/gcc/config/rs6000/darwin-fpsave.asm b/gcc/config/rs6000/darwin-fpsave.asm new file mode 100644 index 000000000..47fdc92f8 --- /dev/null +++ b/gcc/config/rs6000/darwin-fpsave.asm @@ -0,0 +1,92 @@ +/* This file contains the floating-point save and restore routines. + * + * Copyright (C) 2004, 2009 Free Software Foundation, Inc. + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE + ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31, + for example. For FP reg saves/restores, it takes one instruction + (4 bytes) to do the operation; for Vector regs, 2 instructions are + required (8 bytes.) + + MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */ + +#include "darwin-asm.h" + +.text + .align 2 + +/* saveFP saves R0 -- assumed to be the callers LR -- to 8/16(R1). */ + +.private_extern saveFP +saveFP: + stfd f14,-144(r1) + stfd f15,-136(r1) + stfd f16,-128(r1) + stfd f17,-120(r1) + stfd f18,-112(r1) + stfd f19,-104(r1) + stfd f20,-96(r1) + stfd f21,-88(r1) + stfd f22,-80(r1) + stfd f23,-72(r1) + stfd f24,-64(r1) + stfd f25,-56(r1) + stfd f26,-48(r1) + stfd f27,-40(r1) + stfd f28,-32(r1) + stfd f29,-24(r1) + stfd f30,-16(r1) + stfd f31,-8(r1) + stg r0,SAVED_LR_OFFSET(r1) + blr + +/* restFP restores the caller`s LR from 8/16(R1). Note that the code for + this starts at the offset of F30 restoration, so calling this + routine in an attempt to restore only F31 WILL NOT WORK (it would + be a stupid thing to do, anyway.) */ + +.private_extern restFP +restFP: + lfd f14,-144(r1) + lfd f15,-136(r1) + lfd f16,-128(r1) + lfd f17,-120(r1) + lfd f18,-112(r1) + lfd f19,-104(r1) + lfd f20,-96(r1) + lfd f21,-88(r1) + lfd f22,-80(r1) + lfd f23,-72(r1) + lfd f24,-64(r1) + lfd f25,-56(r1) + lfd f26,-48(r1) + lfd f27,-40(r1) + lfd f28,-32(r1) + lfd f29,-24(r1) + /* restore callers LR */ + lg r0,SAVED_LR_OFFSET(r1) + lfd f30,-16(r1) + /* and prepare for return to caller */ + mtlr r0 + lfd f31,-8(r1) + blr diff --git a/gcc/config/rs6000/darwin-ldouble-format b/gcc/config/rs6000/darwin-ldouble-format new file mode 100644 index 000000000..3d1a06a14 --- /dev/null +++ b/gcc/config/rs6000/darwin-ldouble-format @@ -0,0 +1,91 @@ +Long double format +================== + + Each long double is made up of two IEEE doubles. The value of the +long double is the sum of the values of the two parts (except for +-0.0). The most significant part is required to be the value of the +long double rounded to the nearest double, as specified by IEEE. For +Inf values, the least significant part is required to be one of +0.0 +or -0.0. No other requirements are made; so, for example, 1.0 may be +represented as (1.0, +0.0) or (1.0, -0.0), and the low part of a NaN +is don't-care. + +Classification +-------------- + +A long double can represent any value of the form + s * 2^e * sum(k=0...105: f_k * 2^(-k)) +where 's' is +1 or -1, 'e' is between 1022 and -968 inclusive, f_0 is +1, and f_k for k>0 is 0 or 1. These are the 'normal' long doubles. + +A long double can also represent any value of the form + s * 2^-968 * sum(k=0...105: f_k * 2^(-k)) +where 's' is +1 or -1, f_0 is 0, and f_k for k>0 is 0 or 1. These are +the 'subnormal' long doubles. + +There are four long doubles that represent zero, two that represent ++0.0 and two that represent -0.0. The sign of the high part is the +sign of the long double, and the sign of the low part is ignored. + +Likewise, there are four long doubles that represent infinities, two +for +Inf and two for -Inf. + +Each NaN, quiet or signalling, that can be represented as a 'double' +can be represented as a 'long double'. In fact, there are 2^64 +equivalent representations for each one. + +There are certain other valid long doubles where both parts are +nonzero but the low part represents a value which has a bit set below +2^(e-105). These, together with the subnormal long doubles, make up +the denormal long doubles. + +Many possible long double bit patterns are not valid long doubles. +These do not represent any value. + +Limits +------ + +The maximum representable long double is 2^1024-2^918. The smallest +*normal* positive long double is 2^-968. The smallest denormalised +positive long double is 2^-1074 (this is the same as for 'double'). + +Conversions +----------- + +A double can be converted to a long double by adding a zero low part. + +A long double can be converted to a double by removing the low part. + +Comparisons +----------- + +Two long doubles can be compared by comparing the high parts, and if +those compare equal, comparing the low parts. + +Arithmetic +---------- + +The unary negate operation operates by negating the low and high parts. + +An absolute or absolute-negate operation must be done by comparing +against zero and negating if necessary. + +Addition and subtraction are performed using library routines. They +are not at present performed perfectly accurately, the result produced +will be within 1ulp of the range generated by adding or subtracting +1ulp from the input values, where a 'ulp' is 2^(e-106) given the +exponent 'e'. In the presence of cancellation, this may be +arbitrarily inaccurate. Subtraction is done by negation and addition. + +Multiplication is also performed using a library routine. Its result +will be within 2ulp of the correct result. + +Division is also performed using a library routine. Its result will +be within 3ulp of the correct result. + + +Copyright (C) 2004 Free Software Foundation, Inc. + +Copying and distribution of this file, with or without modification, +are permitted in any medium without royalty provided the copyright +notice and this notice are preserved. diff --git a/gcc/config/rs6000/darwin-ldouble.c b/gcc/config/rs6000/darwin-ldouble.c new file mode 100644 index 000000000..d76c1b184 --- /dev/null +++ b/gcc/config/rs6000/darwin-ldouble.c @@ -0,0 +1,438 @@ +/* 128-bit long double support routines for Darwin. + Copyright (C) 1993, 2003, 2004, 2005, 2006, 2007, 2008, 2009 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + + +/* Implementations of floating-point long double basic arithmetic + functions called by the IBM C compiler when generating code for + PowerPC platforms. In particular, the following functions are + implemented: __gcc_qadd, __gcc_qsub, __gcc_qmul, and __gcc_qdiv. + Double-double algorithms are based on the paper "Doubled-Precision + IEEE Standard 754 Floating-Point Arithmetic" by W. Kahan, February 26, + 1987. An alternative published reference is "Software for + Doubled-Precision Floating-Point Computations", by Seppo Linnainmaa, + ACM TOMS vol 7 no 3, September 1981, pages 272-283. */ + +/* Each long double is made up of two IEEE doubles. The value of the + long double is the sum of the values of the two parts. The most + significant part is required to be the value of the long double + rounded to the nearest double, as specified by IEEE. For Inf + values, the least significant part is required to be one of +0.0 or + -0.0. No other requirements are made; so, for example, 1.0 may be + represented as (1.0, +0.0) or (1.0, -0.0), and the low part of a + NaN is don't-care. + + This code currently assumes big-endian. */ + +#if (!defined (__LITTLE_ENDIAN__) \ + && (defined (__MACH__) || defined (__powerpc__) || defined (_AIX))) + +#define fabs(x) __builtin_fabs(x) +#define isless(x, y) __builtin_isless (x, y) +#define inf() __builtin_inf() + +#define unlikely(x) __builtin_expect ((x), 0) + +#define nonfinite(a) unlikely (! isless (fabs (a), inf ())) + +/* Define ALIASNAME as a strong alias for NAME. */ +# define strong_alias(name, aliasname) _strong_alias(name, aliasname) +# define _strong_alias(name, aliasname) \ + extern __typeof (name) aliasname __attribute__ ((alias (#name))); + +/* All these routines actually take two long doubles as parameters, + but GCC currently generates poor code when a union is used to turn + a long double into a pair of doubles. */ + +long double __gcc_qadd (double, double, double, double); +long double __gcc_qsub (double, double, double, double); +long double __gcc_qmul (double, double, double, double); +long double __gcc_qdiv (double, double, double, double); + +#if defined __ELF__ && defined SHARED \ + && (defined __powerpc64__ || !(defined __linux__ || defined __gnu_hurd__)) +/* Provide definitions of the old symbol names to satisfy apps and + shared libs built against an older libgcc. To access the _xlq + symbols an explicit version reference is needed, so these won't + satisfy an unadorned reference like _xlqadd. If dot symbols are + not needed, the assembler will remove the aliases from the symbol + table. */ +__asm__ (".symver __gcc_qadd,_xlqadd@GCC_3.4\n\t" + ".symver __gcc_qsub,_xlqsub@GCC_3.4\n\t" + ".symver __gcc_qmul,_xlqmul@GCC_3.4\n\t" + ".symver __gcc_qdiv,_xlqdiv@GCC_3.4\n\t" + ".symver .__gcc_qadd,._xlqadd@GCC_3.4\n\t" + ".symver .__gcc_qsub,._xlqsub@GCC_3.4\n\t" + ".symver .__gcc_qmul,._xlqmul@GCC_3.4\n\t" + ".symver .__gcc_qdiv,._xlqdiv@GCC_3.4"); +#endif + +typedef union +{ + long double ldval; + double dval[2]; +} longDblUnion; + +/* Add two 'long double' values and return the result. */ +long double +__gcc_qadd (double a, double aa, double c, double cc) +{ + longDblUnion x; + double z, q, zz, xh; + + z = a + c; + + if (nonfinite (z)) + { + z = cc + aa + c + a; + if (nonfinite (z)) + return z; + x.dval[0] = z; /* Will always be DBL_MAX. */ + zz = aa + cc; + if (fabs(a) > fabs(c)) + x.dval[1] = a - z + c + zz; + else + x.dval[1] = c - z + a + zz; + } + else + { + q = a - z; + zz = q + c + (a - (q + z)) + aa + cc; + + /* Keep -0 result. */ + if (zz == 0.0) + return z; + + xh = z + zz; + if (nonfinite (xh)) + return xh; + + x.dval[0] = xh; + x.dval[1] = z - xh + zz; + } + return x.ldval; +} + +long double +__gcc_qsub (double a, double b, double c, double d) +{ + return __gcc_qadd (a, b, -c, -d); +} + +#ifdef __NO_FPRS__ +static double fmsub (double, double, double); +#endif + +long double +__gcc_qmul (double a, double b, double c, double d) +{ + longDblUnion z; + double t, tau, u, v, w; + + t = a * c; /* Highest order double term. */ + + if (unlikely (t == 0) /* Preserve -0. */ + || nonfinite (t)) + return t; + + /* Sum terms of two highest orders. */ + + /* Use fused multiply-add to get low part of a * c. */ +#ifndef __NO_FPRS__ + asm ("fmsub %0,%1,%2,%3" : "=f"(tau) : "f"(a), "f"(c), "f"(t)); +#else + tau = fmsub (a, c, t); +#endif + v = a*d; + w = b*c; + tau += v + w; /* Add in other second-order terms. */ + u = t + tau; + + /* Construct long double result. */ + if (nonfinite (u)) + return u; + z.dval[0] = u; + z.dval[1] = (t - u) + tau; + return z.ldval; +} + +long double +__gcc_qdiv (double a, double b, double c, double d) +{ + longDblUnion z; + double s, sigma, t, tau, u, v, w; + + t = a / c; /* highest order double term */ + + if (unlikely (t == 0) /* Preserve -0. */ + || nonfinite (t)) + return t; + + /* Finite nonzero result requires corrections to the highest order term. */ + + s = c * t; /* (s,sigma) = c*t exactly. */ + w = -(-b + d * t); /* Written to get fnmsub for speed, but not + numerically necessary. */ + + /* Use fused multiply-add to get low part of c * t. */ +#ifndef __NO_FPRS__ + asm ("fmsub %0,%1,%2,%3" : "=f"(sigma) : "f"(c), "f"(t), "f"(s)); +#else + sigma = fmsub (c, t, s); +#endif + v = a - s; + + tau = ((v-sigma)+w)/c; /* Correction to t. */ + u = t + tau; + + /* Construct long double result. */ + if (nonfinite (u)) + return u; + z.dval[0] = u; + z.dval[1] = (t - u) + tau; + return z.ldval; +} + +#if defined (_SOFT_DOUBLE) && defined (__LONG_DOUBLE_128__) + +long double __gcc_qneg (double, double); +int __gcc_qeq (double, double, double, double); +int __gcc_qne (double, double, double, double); +int __gcc_qge (double, double, double, double); +int __gcc_qle (double, double, double, double); +long double __gcc_stoq (float); +long double __gcc_dtoq (double); +float __gcc_qtos (double, double); +double __gcc_qtod (double, double); +int __gcc_qtoi (double, double); +unsigned int __gcc_qtou (double, double); +long double __gcc_itoq (int); +long double __gcc_utoq (unsigned int); + +extern int __eqdf2 (double, double); +extern int __ledf2 (double, double); +extern int __gedf2 (double, double); + +/* Negate 'long double' value and return the result. */ +long double +__gcc_qneg (double a, double aa) +{ + longDblUnion x; + + x.dval[0] = -a; + x.dval[1] = -aa; + return x.ldval; +} + +/* Compare two 'long double' values for equality. */ +int +__gcc_qeq (double a, double aa, double c, double cc) +{ + if (__eqdf2 (a, c) == 0) + return __eqdf2 (aa, cc); + return 1; +} + +strong_alias (__gcc_qeq, __gcc_qne); + +/* Compare two 'long double' values for less than or equal. */ +int +__gcc_qle (double a, double aa, double c, double cc) +{ + if (__eqdf2 (a, c) == 0) + return __ledf2 (aa, cc); + return __ledf2 (a, c); +} + +strong_alias (__gcc_qle, __gcc_qlt); + +/* Compare two 'long double' values for greater than or equal. */ +int +__gcc_qge (double a, double aa, double c, double cc) +{ + if (__eqdf2 (a, c) == 0) + return __gedf2 (aa, cc); + return __gedf2 (a, c); +} + +strong_alias (__gcc_qge, __gcc_qgt); + +/* Convert single to long double. */ +long double +__gcc_stoq (float a) +{ + longDblUnion x; + + x.dval[0] = (double) a; + x.dval[1] = 0.0; + + return x.ldval; +} + +/* Convert double to long double. */ +long double +__gcc_dtoq (double a) +{ + longDblUnion x; + + x.dval[0] = a; + x.dval[1] = 0.0; + + return x.ldval; +} + +/* Convert long double to single. */ +float +__gcc_qtos (double a, double aa __attribute__ ((__unused__))) +{ + return (float) a; +} + +/* Convert long double to double. */ +double +__gcc_qtod (double a, double aa __attribute__ ((__unused__))) +{ + return a; +} + +/* Convert long double to int. */ +int +__gcc_qtoi (double a, double aa) +{ + double z = a + aa; + return (int) z; +} + +/* Convert long double to unsigned int. */ +unsigned int +__gcc_qtou (double a, double aa) +{ + double z = a + aa; + return (unsigned int) z; +} + +/* Convert int to long double. */ +long double +__gcc_itoq (int a) +{ + return __gcc_dtoq ((double) a); +} + +/* Convert unsigned int to long double. */ +long double +__gcc_utoq (unsigned int a) +{ + return __gcc_dtoq ((double) a); +} + +#endif + +#ifdef __NO_FPRS__ + +int __gcc_qunord (double, double, double, double); + +extern int __eqdf2 (double, double); +extern int __unorddf2 (double, double); + +/* Compare two 'long double' values for unordered. */ +int +__gcc_qunord (double a, double aa, double c, double cc) +{ + if (__eqdf2 (a, c) == 0) + return __unorddf2 (aa, cc); + return __unorddf2 (a, c); +} + +#include "config/soft-fp/soft-fp.h" +#include "config/soft-fp/double.h" +#include "config/soft-fp/quad.h" + +/* Compute floating point multiply-subtract with higher (quad) precision. */ +static double +fmsub (double a, double b, double c) +{ + FP_DECL_EX; + FP_DECL_D(A); + FP_DECL_D(B); + FP_DECL_D(C); + FP_DECL_Q(X); + FP_DECL_Q(Y); + FP_DECL_Q(Z); + FP_DECL_Q(U); + FP_DECL_Q(V); + FP_DECL_D(R); + double r; + long double u, x, y, z; + + FP_INIT_ROUNDMODE; + FP_UNPACK_RAW_D (A, a); + FP_UNPACK_RAW_D (B, b); + FP_UNPACK_RAW_D (C, c); + + /* Extend double to quad. */ +#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q + FP_EXTEND(Q,D,4,2,X,A); + FP_EXTEND(Q,D,4,2,Y,B); + FP_EXTEND(Q,D,4,2,Z,C); +#else + FP_EXTEND(Q,D,2,1,X,A); + FP_EXTEND(Q,D,2,1,Y,B); + FP_EXTEND(Q,D,2,1,Z,C); +#endif + FP_PACK_RAW_Q(x,X); + FP_PACK_RAW_Q(y,Y); + FP_PACK_RAW_Q(z,Z); + FP_HANDLE_EXCEPTIONS; + + /* Multiply. */ + FP_INIT_ROUNDMODE; + FP_UNPACK_Q(X,x); + FP_UNPACK_Q(Y,y); + FP_MUL_Q(U,X,Y); + FP_PACK_Q(u,U); + FP_HANDLE_EXCEPTIONS; + + /* Subtract. */ + FP_INIT_ROUNDMODE; + FP_UNPACK_SEMIRAW_Q(U,u); + FP_UNPACK_SEMIRAW_Q(Z,z); + FP_SUB_Q(V,U,Z); + + /* Truncate quad to double. */ +#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q + V_f[3] &= 0x0007ffff; + FP_TRUNC(D,Q,2,4,R,V); +#else + V_f1 &= 0x0007ffffffffffffL; + FP_TRUNC(D,Q,1,2,R,V); +#endif + FP_PACK_SEMIRAW_D(r,R); + FP_HANDLE_EXCEPTIONS; + + return r; +} + +#endif + +#endif diff --git a/gcc/config/rs6000/darwin-libgcc.10.4.ver b/gcc/config/rs6000/darwin-libgcc.10.4.ver new file mode 100644 index 000000000..0c6f7c231 --- /dev/null +++ b/gcc/config/rs6000/darwin-libgcc.10.4.ver @@ -0,0 +1,93 @@ +# Copyright (C) 2005 Free Software Foundation, Inc. +# +# This file is part of GCC. +# +# GCC is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GCC is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . +__Unwind_Backtrace +__Unwind_DeleteException +__Unwind_FindEnclosingFunction +__Unwind_Find_FDE +__Unwind_ForcedUnwind +__Unwind_GetCFA +__Unwind_GetDataRelBase +__Unwind_GetGR +__Unwind_GetIP +__Unwind_GetLanguageSpecificData +__Unwind_GetRegionStart +__Unwind_GetTextRelBase +__Unwind_RaiseException +__Unwind_Resume +__Unwind_Resume_or_Rethrow +__Unwind_SetGR +__Unwind_SetIP +___absvdi2 +___absvsi2 +___addvdi3 +___addvsi3 +___ashldi3 +___ashrdi3 +___clear_cache +___clzdi2 +___clzsi2 +___cmpdi2 +___ctzdi2 +___ctzsi2 +___deregister_frame +___deregister_frame_info +___deregister_frame_info_bases +___divdi3 +___enable_execute_stack +___ffsdi2 +___fixdfdi +___fixsfdi +___fixtfdi +___fixunsdfdi +___fixunsdfsi +___fixunssfdi +___fixunssfsi +___fixunstfdi +___floatdidf +___floatdisf +___floatditf +___gcc_personality_v0 +___gcc_qadd +___gcc_qdiv +___gcc_qmul +___gcc_qsub +___lshrdi3 +___moddi3 +___muldi3 +___mulvdi3 +___mulvsi3 +___negdi2 +___negvdi2 +___negvsi2 +___paritydi2 +___paritysi2 +___popcountdi2 +___popcountsi2 +___register_frame +___register_frame_info +___register_frame_info_bases +___register_frame_info_table +___register_frame_info_table_bases +___register_frame_table +___subvdi3 +___subvsi3 +___trampoline_setup +___ucmpdi2 +___udivdi3 +___udivmoddi4 +___umoddi3 diff --git a/gcc/config/rs6000/darwin-libgcc.10.5.ver b/gcc/config/rs6000/darwin-libgcc.10.5.ver new file mode 100644 index 000000000..c2f08924f --- /dev/null +++ b/gcc/config/rs6000/darwin-libgcc.10.5.ver @@ -0,0 +1,106 @@ +# Copyright (C) 2005, 2006 Free Software Foundation, Inc. +# +# This file is part of GCC. +# +# GCC is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GCC is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . +__Unwind_Backtrace +__Unwind_DeleteException +__Unwind_FindEnclosingFunction +__Unwind_Find_FDE +__Unwind_ForcedUnwind +__Unwind_GetCFA +__Unwind_GetDataRelBase +__Unwind_GetGR +__Unwind_GetIP +__Unwind_GetIPInfo +__Unwind_GetLanguageSpecificData +__Unwind_GetRegionStart +__Unwind_GetTextRelBase +__Unwind_RaiseException +__Unwind_Resume +__Unwind_Resume_or_Rethrow +__Unwind_SetGR +__Unwind_SetIP +___absvdi2 +___absvsi2 +___addvdi3 +___addvsi3 +___ashldi3 +___ashrdi3 +___clear_cache +___clzdi2 +___clzsi2 +___cmpdi2 +___ctzdi2 +___ctzsi2 +___deregister_frame +___deregister_frame_info +___deregister_frame_info_bases +___divdc3 +___divdi3 +___divsc3 +___divtc3 +___enable_execute_stack +___ffsdi2 +___fixdfdi +___fixsfdi +___fixtfdi +___fixunsdfdi +___fixunsdfsi +___fixunssfdi +___fixunssfsi +___fixunstfdi +___floatdidf +___floatdisf +___floatditf +___floatundidf +___floatundisf +___floatunditf +___gcc_personality_v0 +___gcc_qadd +___gcc_qdiv +___gcc_qmul +___gcc_qsub +___lshrdi3 +___moddi3 +___muldc3 +___muldi3 +___mulsc3 +___multc3 +___mulvdi3 +___mulvsi3 +___negdi2 +___negvdi2 +___negvsi2 +___paritydi2 +___paritysi2 +___popcountdi2 +___popcountsi2 +___powidf2 +___powisf2 +___powitf2 +___register_frame +___register_frame_info +___register_frame_info_bases +___register_frame_info_table +___register_frame_info_table_bases +___register_frame_table +___subvdi3 +___subvsi3 +___trampoline_setup +___ucmpdi2 +___udivdi3 +___udivmoddi4 +___umoddi3 diff --git a/gcc/config/rs6000/darwin-tramp.asm b/gcc/config/rs6000/darwin-tramp.asm new file mode 100644 index 000000000..5188c98ef --- /dev/null +++ b/gcc/config/rs6000/darwin-tramp.asm @@ -0,0 +1,125 @@ +/* Special support for trampolines + * + * Copyright (C) 1996, 1997, 2000, 2004, 2005, 2009 Free Software Foundation, Inc. + * Written By Michael Meissner + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +#include "darwin-asm.h" + +/* Set up trampolines. */ + +.text + .align LOG2_GPR_BYTES +Ltrampoline_initial: + mflr r0 + bl 1f +Lfunc = .-Ltrampoline_initial + .g_long 0 /* will be replaced with function address */ +Lchain = .-Ltrampoline_initial + .g_long 0 /* will be replaced with static chain */ +1: mflr r11 + lg r12,0(r11) /* function address */ + mtlr r0 + mtctr r12 + lg r11,GPR_BYTES(r11) /* static chain */ + bctr + +trampoline_size = .-Ltrampoline_initial + +/* R3 = stack address to store trampoline */ +/* R4 = length of trampoline area */ +/* R5 = function address */ +/* R6 = static chain */ + + .globl ___trampoline_setup +___trampoline_setup: + mflr r0 /* save return address */ + bcl 20,31,LCF0 /* load up __trampoline_initial into r7 */ +LCF0: + mflr r11 + addis r7,r11,ha16(LTRAMP-LCF0) + lg r7,lo16(LTRAMP-LCF0)(r7) + subi r7,r7,4 + li r8,trampoline_size /* verify trampoline big enough */ + cmpg cr1,r8,r4 + srwi r4,r4,2 /* # words to move (insns always 4-byte) */ + addi r9,r3,-4 /* adjust pointer for lgu */ + mtctr r4 + blt cr1,Labort + + mtlr r0 + + /* Copy the instructions to the stack */ +Lmove: + lwzu r10,4(r7) + stwu r10,4(r9) + bdnz Lmove + + /* Store correct function and static chain */ + stg r5,Lfunc(r3) + stg r6,Lchain(r3) + + /* Now flush both caches */ + mtctr r4 +Lcache: + icbi 0,r3 + dcbf 0,r3 + addi r3,r3,4 + bdnz Lcache + + /* Ensure cache-flushing has finished. */ + sync + isync + + /* Make stack writeable. */ + b ___enable_execute_stack + +Labort: +#ifdef __DYNAMIC__ + bl L_abort$stub +.data +.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32 + .align 2 +L_abort$stub: + .indirect_symbol _abort + mflr r0 + bcl 20,31,L0$_abort +L0$_abort: + mflr r11 + addis r11,r11,ha16(L_abort$lazy_ptr-L0$_abort) + mtlr r0 + lgu r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11) + mtctr r12 + bctr +.data +.lazy_symbol_pointer +L_abort$lazy_ptr: + .indirect_symbol _abort + .g_long dyld_stub_binding_helper +#else + bl _abort +#endif +.data + .align LOG2_GPR_BYTES +LTRAMP: + .g_long Ltrampoline_initial + diff --git a/gcc/config/rs6000/darwin-unwind.h b/gcc/config/rs6000/darwin-unwind.h new file mode 100644 index 000000000..9fdc115be --- /dev/null +++ b/gcc/config/rs6000/darwin-unwind.h @@ -0,0 +1,30 @@ +/* DWARF2 EH unwinding support for Darwin. + Copyright (C) 2004, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +extern bool _Unwind_fallback_frame_state_for + (struct _Unwind_Context *context, _Unwind_FrameState *fs); + +#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS) \ + (_Unwind_fallback_frame_state_for (CONTEXT, FS) \ + ? _URC_NO_REASON : _URC_END_OF_STACK) diff --git a/gcc/config/rs6000/darwin-vecsave.asm b/gcc/config/rs6000/darwin-vecsave.asm new file mode 100644 index 000000000..0a46be20c --- /dev/null +++ b/gcc/config/rs6000/darwin-vecsave.asm @@ -0,0 +1,155 @@ +/* This file contains the vector save and restore routines. + * + * Copyright (C) 2004, 2009 Free Software Foundation, Inc. + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Vector save/restore routines for Darwin. Note that each vector + save/restore requires 2 instructions (8 bytes.) + + THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE + ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31, + for example. For FP reg saves/restores, it takes one instruction + (4 bytes) to do the operation; for Vector regs, 2 instructions are + required (8 bytes.). */ + + .machine ppc7400 +.text + .align 2 + +.private_extern saveVEC +saveVEC: + li r11,-192 + stvx v20,r11,r0 + li r11,-176 + stvx v21,r11,r0 + li r11,-160 + stvx v22,r11,r0 + li r11,-144 + stvx v23,r11,r0 + li r11,-128 + stvx v24,r11,r0 + li r11,-112 + stvx v25,r11,r0 + li r11,-96 + stvx v26,r11,r0 + li r11,-80 + stvx v27,r11,r0 + li r11,-64 + stvx v28,r11,r0 + li r11,-48 + stvx v29,r11,r0 + li r11,-32 + stvx v30,r11,r0 + li r11,-16 + stvx v31,r11,r0 + blr + +.private_extern restVEC +restVEC: + li r11,-192 + lvx v20,r11,r0 + li r11,-176 + lvx v21,r11,r0 + li r11,-160 + lvx v22,r11,r0 + li r11,-144 + lvx v23,r11,r0 + li r11,-128 + lvx v24,r11,r0 + li r11,-112 + lvx v25,r11,r0 + li r11,-96 + lvx v26,r11,r0 + li r11,-80 + lvx v27,r11,r0 + li r11,-64 + lvx v28,r11,r0 + li r11,-48 + lvx v29,r11,r0 + li r11,-32 + lvx v30,r11,r0 + li r11,-16 + lvx v31,r11,r0 + blr + +/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */ + +.private_extern saveVEC_vr11 +saveVEC_vr11: + li r11,-192 + stvx v20,r11,r0 + li r11,-176 + stvx v21,r11,r0 + li r11,-160 + stvx v22,r11,r0 + li r11,-144 + stvx v23,r11,r0 + li r11,-128 + stvx v24,r11,r0 + li r11,-112 + stvx v25,r11,r0 + li r11,-96 + stvx v26,r11,r0 + li r11,-80 + stvx v27,r11,r0 + li r11,-64 + stvx v28,r11,r0 + li r11,-48 + stvx v29,r11,r0 + li r11,-32 + stvx v30,r11,r0 + li r11,-16 + stvx v31,r11,r0 + mfspr r11,VRsave + blr + +/* As restVec, but the original VRsave value passed in R10. */ + +.private_extern restVEC_vr10 +restVEC_vr10: + li r11,-192 + lvx v20,r11,r0 + li r11,-176 + lvx v21,r11,r0 + li r11,-160 + lvx v22,r11,r0 + li r11,-144 + lvx v23,r11,r0 + li r11,-128 + lvx v24,r11,r0 + li r11,-112 + lvx v25,r11,r0 + li r11,-96 + lvx v26,r11,r0 + li r11,-80 + lvx v27,r11,r0 + li r11,-64 + lvx v28,r11,r0 + li r11,-48 + lvx v29,r11,r0 + li r11,-32 + lvx v30,r11,r0 + li r11,-16 + lvx v31,r11,r0 + /* restore VRsave from R10. */ + mtspr VRsave,r10 + blr diff --git a/gcc/config/rs6000/darwin-world.asm b/gcc/config/rs6000/darwin-world.asm new file mode 100644 index 000000000..c0b1bf1a2 --- /dev/null +++ b/gcc/config/rs6000/darwin-world.asm @@ -0,0 +1,259 @@ +/* This file contains the exception-handling save_world and + * restore_world routines, which need to do a run-time check to see if + * they should save and restore the vector registers. + * + * Copyright (C) 2004, 2009 Free Software Foundation, Inc. + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .machine ppc7400 +.data + .align 2 + +#ifdef __DYNAMIC__ + +.non_lazy_symbol_pointer +L_has_vec$non_lazy_ptr: + .indirect_symbol __cpu_has_altivec +#ifdef __ppc64__ + .quad 0 +#else + .long 0 +#endif + +#else + +/* For static, "pretend" we have a non-lazy-pointer. */ + +L_has_vec$non_lazy_ptr: + .long __cpu_has_altivec + +#endif + + +.text + .align 2 + +/* save_world and rest_world save/restore F14-F31 and possibly V20-V31 + (assuming you have a CPU with vector registers; we use a global var + provided by the System Framework to determine this.) + + SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11 + (the stack frame size) as parameters. It returns VRsave in R0 if + we`re on a CPU with vector regs. + + With gcc3, we now need to save and restore CR as well, since gcc3's + scheduled prologs can cause comparisons to be moved before calls to + save_world! + + USES: R0 R11 R12 */ + +.private_extern save_world +save_world: + stw r0,8(r1) + mflr r0 + bcl 20,31,Ls$pb +Ls$pb: mflr r12 + addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb) + lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12) + mtlr r0 + lwz r12,0(r12) + /* grab CR */ + mfcr r0 + /* test HAS_VEC */ + cmpwi r12,0 + stfd f14,-144(r1) + stfd f15,-136(r1) + stfd f16,-128(r1) + stfd f17,-120(r1) + stfd f18,-112(r1) + stfd f19,-104(r1) + stfd f20,-96(r1) + stfd f21,-88(r1) + stfd f22,-80(r1) + stfd f23,-72(r1) + stfd f24,-64(r1) + stfd f25,-56(r1) + stfd f26,-48(r1) + stfd f27,-40(r1) + stfd f28,-32(r1) + stfd f29,-24(r1) + stfd f30,-16(r1) + stfd f31,-8(r1) + stmw r13,-220(r1) + /* stash CR */ + stw r0,4(r1) + /* set R12 pointing at Vector Reg save area */ + addi r12,r1,-224 + /* allocate stack frame */ + stwux r1,r1,r11 + /* ...but return if HAS_VEC is zero */ + bne+ L$saveVMX + /* Not forgetting to restore CR. */ + mtcr r0 + blr + +L$saveVMX: + /* We're saving Vector regs too. */ + /* Restore CR from R0. No More Branches! */ + mtcr r0 + + /* We should really use VRSAVE to figure out which vector regs + we actually need to save and restore. Some other time :-/ */ + + li r11,-192 + stvx v20,r11,r12 + li r11,-176 + stvx v21,r11,r12 + li r11,-160 + stvx v22,r11,r12 + li r11,-144 + stvx v23,r11,r12 + li r11,-128 + stvx v24,r11,r12 + li r11,-112 + stvx v25,r11,r12 + li r11,-96 + stvx v26,r11,r12 + li r11,-80 + stvx v27,r11,r12 + li r11,-64 + stvx v28,r11,r12 + li r11,-48 + stvx v29,r11,r12 + li r11,-32 + stvx v30,r11,r12 + mfspr r0,VRsave + li r11,-16 + stvx v31,r11,r12 + /* VRsave lives at -224(R1) */ + stw r0,0(r12) + blr + + +/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR. + R10 is the C++ EH stack adjust parameter, we return to the caller`s caller. + + USES: R0 R10 R11 R12 and R7 R8 + RETURNS: C++ EH Data registers (R3 - R6.) + + We now set up R7/R8 and jump to rest_world_eh_r7r8. + + rest_world doesn't use the R10 stack adjust parameter, nor does it + pick up the R3-R6 exception handling stuff. */ + +.private_extern rest_world +rest_world: + /* Pickup previous SP */ + lwz r11, 0(r1) + li r7, 0 + lwz r8, 8(r11) + li r10, 0 + b rest_world_eh_r7r8 + +.private_extern eh_rest_world_r10 +eh_rest_world_r10: + /* Pickup previous SP */ + lwz r11, 0(r1) + mr r7,r10 + lwz r8, 8(r11) + /* pickup the C++ EH data regs (R3 - R6.) */ + lwz r6,-420(r11) + lwz r5,-424(r11) + lwz r4,-428(r11) + lwz r3,-432(r11) + + b rest_world_eh_r7r8 + +/* rest_world_eh_r7r8 is jumped to -- not called! -- when we're doing + the exception-handling epilog. R7 contains the offset to add to + the SP, and R8 contains the 'real' return address. + + USES: R0 R11 R12 [R7/R8] + RETURNS: C++ EH Data registers (R3 - R6.) */ + +rest_world_eh_r7r8: + bcl 20,31,Lr7r8$pb +Lr7r8$pb: mflr r12 + lwz r11,0(r1) + /* R11 := previous SP */ + addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7r8$pb) + lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7r8$pb)(r12) + lwz r0,4(r11) + /* R0 := old CR */ + lwz r12,0(r12) + /* R12 := HAS_VEC */ + mtcr r0 + cmpwi r12,0 + lmw r13,-220(r11) + beq L.rest_world_fp_eh + /* restore VRsave and V20..V31 */ + lwz r0,-224(r11) + li r12,-416 + mtspr VRsave,r0 + lvx v20,r11,r12 + li r12,-400 + lvx v21,r11,r12 + li r12,-384 + lvx v22,r11,r12 + li r12,-368 + lvx v23,r11,r12 + li r12,-352 + lvx v24,r11,r12 + li r12,-336 + lvx v25,r11,r12 + li r12,-320 + lvx v26,r11,r12 + li r12,-304 + lvx v27,r11,r12 + li r12,-288 + lvx v28,r11,r12 + li r12,-272 + lvx v29,r11,r12 + li r12,-256 + lvx v30,r11,r12 + li r12,-240 + lvx v31,r11,r12 + +L.rest_world_fp_eh: + lfd f14,-144(r11) + lfd f15,-136(r11) + lfd f16,-128(r11) + lfd f17,-120(r11) + lfd f18,-112(r11) + lfd f19,-104(r11) + lfd f20,-96(r11) + lfd f21,-88(r11) + lfd f22,-80(r11) + lfd f23,-72(r11) + lfd f24,-64(r11) + lfd f25,-56(r11) + lfd f26,-48(r11) + lfd f27,-40(r11) + lfd f28,-32(r11) + lfd f29,-24(r11) + lfd f30,-16(r11) + /* R8 is the exception-handler's address */ + mtctr r8 + lfd f31,-8(r11) + /* set SP to original value + R7 offset */ + add r1,r11,r7 + bctr diff --git a/gcc/config/rs6000/darwin.h b/gcc/config/rs6000/darwin.h new file mode 100644 index 000000000..244da0065 --- /dev/null +++ b/gcc/config/rs6000/darwin.h @@ -0,0 +1,438 @@ +/* Target definitions for PowerPC running Darwin (Mac OS X). + Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2010 + Free Software Foundation, Inc. + Contributed by Apple Computer Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef DARWIN_PPC +#define DARWIN_PPC 1 + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC)"); + +/* The "Darwin ABI" is mostly like AIX, but with some key differences. */ + +#define DEFAULT_ABI ABI_DARWIN + +#ifdef IN_LIBGCC2 +#undef TARGET_64BIT +#ifdef __powerpc64__ +#define TARGET_64BIT 1 +#else +#define TARGET_64BIT 0 +#endif +#endif + +/* The object file format is Mach-O. */ + +#define TARGET_OBJECT_FORMAT OBJECT_MACHO + +/* Size of the Obj-C jump buffer. */ +#define OBJC_JBLEN ((TARGET_64BIT) ? (26*2 + 18*2 + 129 + 1) : (26 + 18*2 + 129 + 1)) + +/* We're not ever going to do TOCs. */ + +#define TARGET_TOC 0 +#define TARGET_NO_TOC 1 + +/* Override the default rs6000 definition. */ +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int") + +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + if (!TARGET_64BIT) builtin_define ("__ppc__"); \ + if (TARGET_64BIT) builtin_define ("__ppc64__"); \ + builtin_define ("__POWERPC__"); \ + builtin_define ("__NATURAL_ALIGNMENT__"); \ + darwin_cpp_builtins (pfile); \ + } \ + while (0) + +/* Generate branch islands stubs if this is true. */ +extern int darwin_emit_branch_islands; + +#define SUBTARGET_OVERRIDE_OPTIONS darwin_rs6000_override_options () + +#define C_COMMON_OVERRIDE_OPTIONS do { \ + /* On powerpc, __cxa_get_exception_ptr is available starting in the \ + 10.4.6 libstdc++.dylib. */ \ + if (strverscmp (darwin_macosx_version_min, "10.4.6") < 0 \ + && flag_use_cxa_get_exception_ptr == 2) \ + flag_use_cxa_get_exception_ptr = 0; \ + if (flag_mkernel) \ + flag_no_builtin = 1; \ + SUBTARGET_C_COMMON_OVERRIDE_OPTIONS; \ +} while (0) + +/* Darwin has 128-bit long double support in libc in 10.4 and later. + Default to 128-bit long doubles even on earlier platforms for ABI + consistency; arithmetic will work even if libc and libm support is + not available. */ + +#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128 + + +/* We want -fPIC by default, unless we're using -static to compile for + the kernel or some such. The "-faltivec" option should have been + called "-maltivec" all along. */ + +#define CC1_SPEC "\ + %(cc1_cpu) \ + %{g: %{!fno-eliminate-unused-debug-symbols: -feliminate-unused-debug-symbols }} \ + %{static: %{Zdynamic: %e conflicting code gen style switches are used}}\ + %{!mmacosx-version-min=*:-mmacosx-version-min=%(darwin_minversion)} \ + %{!mkernel:%{!static:%{!mdynamic-no-pic:-fPIC}}} \ + %{faltivec:-maltivec -include altivec.h} %{fno-altivec:-mno-altivec} \ + % 10.4 mmacosx-version-min= crt2.o%s)}" + +/* Determine a minimum version based on compiler options. */ +#define DARWIN_MINVERSION_SPEC \ + "%{m64:%{fgnu-runtime:10.4; \ + ,objective-c|,objc-cpp-output:10.5; \ + ,objective-c-header:10.5; \ + ,objective-c++|,objective-c++-cpp-output:10.5; \ + ,objective-c++-header|,objc++-cpp-output:10.5; \ + :10.4}; \ + shared-libgcc:10.3; \ + :10.1}" + +#undef SUBTARGET_EXTRA_SPECS +#define SUBTARGET_EXTRA_SPECS \ + DARWIN_EXTRA_SPECS \ + { "darwin_arch", DARWIN_ARCH_SPEC }, \ + { "darwin_crt2", DARWIN_CRT2_SPEC }, \ + { "darwin_subarch", DARWIN_SUBARCH_SPEC }, + +/* Output a .machine directive. */ +#undef TARGET_ASM_FILE_START +#define TARGET_ASM_FILE_START rs6000_darwin_file_start + +/* Make both r2 and r13 available for allocation. */ +#define FIXED_R2 0 +#define FIXED_R13 0 + +/* Base register for access to local variables of the function. */ + +#undef HARD_FRAME_POINTER_REGNUM +#define HARD_FRAME_POINTER_REGNUM 30 + +#undef RS6000_PIC_OFFSET_TABLE_REGNUM +#define RS6000_PIC_OFFSET_TABLE_REGNUM 31 + +/* Pad the outgoing args area to 16 bytes instead of the usual 8. */ + +#undef STARTING_FRAME_OFFSET +#define STARTING_FRAME_OFFSET \ + (FRAME_GROWS_DOWNWARD \ + ? 0 \ + : (RS6000_ALIGN (crtl->outgoing_args_size, 16) \ + + RS6000_SAVE_AREA)) + +#undef STACK_DYNAMIC_OFFSET +#define STACK_DYNAMIC_OFFSET(FUNDECL) \ + (RS6000_ALIGN (crtl->outgoing_args_size, 16) \ + + (STACK_POINTER_OFFSET)) + +/* Define cutoff for using external functions to save floating point. + Currently on Darwin, always use inline stores. */ + +#undef FP_SAVE_INLINE +#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64) +#undef GP_SAVE_INLINE +#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32) + +/* Darwin uses a function call if everything needs to be saved/restored. */ +#undef WORLD_SAVE_P +#define WORLD_SAVE_P(INFO) ((INFO)->world_save_p) + +/* The assembler wants the alternate register names, but without + leading percent sign. */ +#undef REGISTER_NAMES +#define REGISTER_NAMES \ +{ \ + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \ + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \ + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \ + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \ + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \ + "mq", "lr", "ctr", "ap", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "xer", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \ + "vrsave", "vscr", \ + "spe_acc", "spefscr", \ + "sfp" \ +} + +/* This outputs NAME to FILE. */ + +#undef RS6000_OUTPUT_BASENAME +#define RS6000_OUTPUT_BASENAME(FILE, NAME) \ + assemble_name (FILE, NAME) + +/* Globalizing directive for a label. */ +#undef GLOBAL_ASM_OP +#define GLOBAL_ASM_OP "\t.globl " +#undef TARGET_ASM_GLOBALIZE_LABEL + +/* This is how to output an internal label prefix. rs6000.c uses this + when generating traceback tables. */ +/* Not really used for Darwin? */ + +#undef ASM_OUTPUT_INTERNAL_LABEL_PREFIX +#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \ + fprintf (FILE, "%s", PREFIX) + +/* Override the standard rs6000 definition. */ + +#undef ASM_COMMENT_START +#define ASM_COMMENT_START ";" + +/* FP save and restore routines. */ +#define SAVE_FP_PREFIX "._savef" +#define SAVE_FP_SUFFIX "" +#define RESTORE_FP_PREFIX "._restf" +#define RESTORE_FP_SUFFIX "" + +/* This is how to output an assembler line that says to advance + the location counter to a multiple of 2**LOG bytes using the + "nop" instruction as padding. */ + +#define ASM_OUTPUT_ALIGN_WITH_NOP(FILE,LOG) \ + do \ + { \ + if ((LOG) < 3) \ + { \ + ASM_OUTPUT_ALIGN (FILE,LOG); \ + } \ + else /* nop == ori r0,r0,0 */ \ + fprintf (FILE, "\t.align32 %d,0x60000000\n", (LOG)); \ + } while (0) + +#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN +/* This is supported in cctools 465 and later. The macro test + above prevents using it in earlier build environments. */ +#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \ + if ((LOG) != 0) \ + { \ + if ((MAX_SKIP) == 0) \ + fprintf ((FILE), "\t.p2align %d\n", (LOG)); \ + else \ + fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \ + } +#endif + +/* Generate insns to call the profiler. */ + +#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL) + +/* Function name to call to do profiling. */ + +#define RS6000_MCOUNT "*mcount" + +/* Default processor: G4, and G5 for 64-bit. */ + +#undef PROCESSOR_DEFAULT +#define PROCESSOR_DEFAULT PROCESSOR_PPC7400 +#undef PROCESSOR_DEFAULT64 +#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4 + +/* Default target flag settings. Despite the fact that STMW/LMW + serializes, it's still a big code size win to use them. Use FSEL by + default as well. */ + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_MULTIPLE | MASK_NEW_MNEMONICS \ + | MASK_PPC_GFXOPT) + +/* Darwin only runs on PowerPC, so short-circuit POWER patterns. */ +#undef TARGET_POWER +#define TARGET_POWER 0 +#undef TARGET_IEEEQUAD +#define TARGET_IEEEQUAD 0 + +/* Since Darwin doesn't do TOCs, stub this out. */ + +#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) ((void)X, (void)MODE, 0) + +/* Unlike most other PowerPC targets, chars are signed, for + consistency with other Darwin architectures. */ + +#undef DEFAULT_SIGNED_CHAR +#define DEFAULT_SIGNED_CHAR (1) + +/* Given an rtx X being reloaded into a reg required to be + in class CLASS, return the class of reg to actually use. + In general this is just CLASS; but on some machines + in some cases it is preferable to use a more restrictive class. + + On the RS/6000, we have to return NO_REGS when we want to reload a + floating-point CONST_DOUBLE to force it to be copied to memory. + + Don't allow R0 when loading the address of, or otherwise furtling with, + a SYMBOL_REF. */ + +#undef PREFERRED_RELOAD_CLASS +#define PREFERRED_RELOAD_CLASS(X,CLASS) \ + ((CONSTANT_P (X) \ + && reg_classes_intersect_p ((CLASS), FLOAT_REGS)) \ + ? NO_REGS \ + : ((GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == HIGH) \ + && reg_class_subset_p (BASE_REGS, (CLASS))) \ + ? BASE_REGS \ + : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \ + && (CLASS) == NON_SPECIAL_REGS) \ + ? GENERAL_REGS \ + : (CLASS)) + +/* Compute field alignment. This is similar to the version of the + macro in the Apple version of GCC, except that version supports + 'mac68k' alignment, and that version uses the computed alignment + always for the first field of a structure. The first-field + behavior is dealt with by + darwin_rs6000_special_round_type_align. */ +#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \ + (TARGET_ALIGN_NATURAL ? (COMPUTED) \ + : (COMPUTED) == 128 ? 128 \ + : MIN ((COMPUTED), 32)) + +/* Darwin increases natural record alignment to doubleword if the first + field is an FP double while the FP fields remain word aligned. */ +#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \ + ((TREE_CODE (STRUCT) == RECORD_TYPE \ + || TREE_CODE (STRUCT) == UNION_TYPE \ + || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \ + && TARGET_ALIGN_NATURAL == 0 \ + ? darwin_rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \ + : (TREE_CODE (STRUCT) == VECTOR_TYPE \ + && ALTIVEC_VECTOR_MODE (TYPE_MODE (STRUCT))) \ + ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \ + : MAX ((COMPUTED), (SPECIFIED))) + +/* Specify padding for the last element of a block move between + registers and memory. FIRST is nonzero if this is the only + element. */ +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ + (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE)) + +#define DOUBLE_INT_ASM_OP "\t.quad\t" + +/* For binary compatibility with 2.95; Darwin C APIs use bool from + stdbool.h, which was an int-sized enum in 2.95. Users can explicitly + choose to have sizeof(bool)==1 with the -mone-byte-bool switch. */ +#define BOOL_TYPE_SIZE (darwin_one_byte_bool ? CHAR_TYPE_SIZE : INT_TYPE_SIZE) + +#undef REGISTER_TARGET_PRAGMAS +#define REGISTER_TARGET_PRAGMAS() \ + do \ + { \ + DARWIN_REGISTER_TARGET_PRAGMAS(); \ + targetm.resolve_overloaded_builtin = altivec_resolve_overloaded_builtin; \ + } \ + while (0) + +#ifdef IN_LIBGCC2 +#include +#endif + +#if !defined(__LP64__) && !defined(DARWIN_LIBSYSTEM_HAS_UNWIND) +#define MD_UNWIND_SUPPORT "config/rs6000/darwin-unwind.h" +#endif + +/* True, iff we're generating fast turn around debugging code. When + true, we arrange for function prologues to start with 5 nops so + that gdb may insert code to redirect them, and for data to be + accessed indirectly. The runtime uses this indirection to forward + references for data to the original instance of that data. */ + +#define TARGET_FIX_AND_CONTINUE (darwin_fix_and_continue) + +/* This is the reserved direct dispatch address for Objective-C. */ +#define OFFS_MSGSEND_FAST 0xFFFEFF00 + +/* This is the reserved ivar address Objective-C. */ +#define OFFS_ASSIGNIVAR_FAST 0xFFFEFEC0 + +/* Old versions of Mac OS/Darwin don't have C99 functions available. */ +#undef TARGET_C99_FUNCTIONS +#define TARGET_C99_FUNCTIONS \ + (TARGET_64BIT \ + || strverscmp (darwin_macosx_version_min, "10.3") >= 0) + +/* When generating kernel code or kexts, we don't use Altivec by + default, as kernel code doesn't save/restore those registers. */ +#define OS_MISSING_ALTIVEC (flag_mkernel || flag_apple_kext) + +/* Darwin has support for section anchors on powerpc*. + It is disabled for any section containing a "zero-sized item" (because these + are re-written as size=1 to be compatible with the OSX ld64). + The re-writing would interfere with the computation of anchor offsets. + Therefore, we place zero-sized items in their own sections and make such + sections unavailable to section anchoring. */ + +#undef TARGET_ASM_OUTPUT_ANCHOR +#define TARGET_ASM_OUTPUT_ANCHOR darwin_asm_output_anchor + +#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P +#define TARGET_USE_ANCHORS_FOR_SYMBOL_P darwin_use_anchors_for_symbol_p + +#undef DARWIN_SECTION_ANCHORS +#define DARWIN_SECTION_ANCHORS 1 + +/* PPC Darwin has to rename some of the long double builtins. */ +#undef SUBTARGET_INIT_BUILTINS +#define SUBTARGET_INIT_BUILTINS \ +do { \ + darwin_patch_builtins (); \ + rs6000_builtin_decls[(unsigned) (RS6000_BUILTIN_CFSTRING)] \ + = darwin_init_cfstring_builtins ((unsigned) (RS6000_BUILTIN_CFSTRING)); \ +} while(0) diff --git a/gcc/config/rs6000/darwin.md b/gcc/config/rs6000/darwin.md new file mode 100644 index 000000000..6b1927779 --- /dev/null +++ b/gcc/config/rs6000/darwin.md @@ -0,0 +1,442 @@ +/* Machine description patterns for PowerPC running Darwin (Mac OS X). + Copyright (C) 2004, 2005, 2007, 2010 Free Software Foundation, Inc. + Contributed by Apple Computer Inc. + +This file is part of GCC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . */ + +(define_insn "adddi3_high" + [(set (match_operand:DI 0 "gpc_reg_operand" "=b") + (plus:DI (match_operand:DI 1 "gpc_reg_operand" "b") + (high:DI (match_operand 2 "" ""))))] + "TARGET_MACHO && TARGET_64BIT" + "{cau|addis} %0,%1,ha16(%2)" + [(set_attr "length" "4")]) + +(define_insn "movdf_low_si" + [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r") + (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && !TARGET_64BIT" + "* +{ + switch (which_alternative) + { + case 0: + return \"lfd %0,lo16(%2)(%1)\"; + case 1: + { + if (TARGET_POWERPC64 && TARGET_32BIT) + /* Note, old assemblers didn't support relocation here. */ + return \"ld %0,lo16(%2)(%1)\"; + else + { + output_asm_insn (\"{cal|la} %0,lo16(%2)(%1)\", operands); + output_asm_insn (\"{l|lwz} %L0,4(%0)\", operands); + return (\"{l|lwz} %0,0(%0)\"); + } + } + default: + gcc_unreachable (); + } +}" + [(set_attr "type" "load") + (set_attr "length" "4,12")]) + + +(define_insn "movdf_low_di" + [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r") + (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT" + "* +{ + switch (which_alternative) + { + case 0: + return \"lfd %0,lo16(%2)(%1)\"; + case 1: + return \"ld %0,lo16(%2)(%1)\"; + default: + gcc_unreachable (); + } +}" + [(set_attr "type" "load") + (set_attr "length" "4,4")]) + +(define_insn "movdf_low_st_si" + [(set (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b") + (match_operand 2 "" ""))) + (match_operand:DF 0 "gpc_reg_operand" "f"))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT" + "stfd %0,lo16(%2)(%1)" + [(set_attr "type" "store") + (set_attr "length" "4")]) + +(define_insn "movdf_low_st_di" + [(set (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b") + (match_operand 2 "" ""))) + (match_operand:DF 0 "gpc_reg_operand" "f"))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT" + "stfd %0,lo16(%2)(%1)" + [(set_attr "type" "store") + (set_attr "length" "4")]) + +(define_insn "movsf_low_si" + [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r") + (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT" + "@ + lfs %0,lo16(%2)(%1) + {l|lwz} %0,lo16(%2)(%1)" + [(set_attr "type" "load") + (set_attr "length" "4")]) + +(define_insn "movsf_low_di" + [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r") + (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT" + "@ + lfs %0,lo16(%2)(%1) + {l|lwz} %0,lo16(%2)(%1)" + [(set_attr "type" "load") + (set_attr "length" "4")]) + +(define_insn "movsf_low_st_si" + [(set (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))) + (match_operand:SF 0 "gpc_reg_operand" "f,!r"))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT" + "@ + stfs %0,lo16(%2)(%1) + {st|stw} %0,lo16(%2)(%1)" + [(set_attr "type" "store") + (set_attr "length" "4")]) + +(define_insn "movsf_low_st_di" + [(set (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))) + (match_operand:SF 0 "gpc_reg_operand" "f,!r"))] + "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT" + "@ + stfs %0,lo16(%2)(%1) + {st|stw} %0,lo16(%2)(%1)" + [(set_attr "type" "store") + (set_attr "length" "4")]) + +;; 64-bit MachO load/store support +(define_insn "movdi_low" + [(set (match_operand:DI 0 "gpc_reg_operand" "=r,*!d") + (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))))] + "TARGET_MACHO && TARGET_64BIT" + "@ + {l|ld} %0,lo16(%2)(%1) + lfd %0,lo16(%2)(%1)" + [(set_attr "type" "load") + (set_attr "length" "4")]) + +(define_insn "movsi_low_st" + [(set (mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b") + (match_operand 2 "" ""))) + (match_operand:SI 0 "gpc_reg_operand" "r"))] + "TARGET_MACHO && ! TARGET_64BIT" + "{st|stw} %0,lo16(%2)(%1)" + [(set_attr "type" "store") + (set_attr "length" "4")]) + +(define_insn "movdi_low_st" + [(set (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b") + (match_operand 2 "" ""))) + (match_operand:DI 0 "gpc_reg_operand" "r,*!d"))] + "TARGET_MACHO && TARGET_64BIT" + "@ + {st|std} %0,lo16(%2)(%1) + stfd %0,lo16(%2)(%1)" + [(set_attr "type" "store") + (set_attr "length" "4")]) + +;; Mach-O PIC trickery. +(define_expand "macho_high" + [(set (match_operand 0 "" "") + (high (match_operand 1 "" "")))] + "TARGET_MACHO" +{ + if (TARGET_64BIT) + emit_insn (gen_macho_high_di (operands[0], operands[1])); + else + emit_insn (gen_macho_high_si (operands[0], operands[1])); + + DONE; +}) + +(define_insn "macho_high_si" + [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r") + (high:SI (match_operand 1 "" "")))] + "TARGET_MACHO && ! TARGET_64BIT" + "{liu|lis} %0,ha16(%1)") + + +(define_insn "macho_high_di" + [(set (match_operand:DI 0 "gpc_reg_operand" "=b*r") + (high:DI (match_operand 1 "" "")))] + "TARGET_MACHO && TARGET_64BIT" + "{liu|lis} %0,ha16(%1)") + +(define_expand "macho_low" + [(set (match_operand 0 "" "") + (lo_sum (match_operand 1 "" "") + (match_operand 2 "" "")))] + "TARGET_MACHO" +{ + if (TARGET_64BIT) + emit_insn (gen_macho_low_di (operands[0], operands[1], operands[2])); + else + emit_insn (gen_macho_low_si (operands[0], operands[1], operands[2])); + + DONE; +}) + +(define_insn "macho_low_si" + [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r") + (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r") + (match_operand 2 "" "")))] + "TARGET_MACHO && ! TARGET_64BIT" + "@ + {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)} + {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}") + +(define_insn "macho_low_di" + [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r") + (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,!*r") + (match_operand 2 "" "")))] + "TARGET_MACHO && TARGET_64BIT" + "@ + {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)} + {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}") + +(define_split + [(set (mem:V4SI (plus:DI (match_operand:DI 0 "gpc_reg_operand" "") + (match_operand:DI 1 "short_cint_operand" ""))) + (match_operand:V4SI 2 "register_operand" "")) + (clobber (match_operand:DI 3 "gpc_reg_operand" ""))] + "TARGET_MACHO && TARGET_64BIT" + [(set (match_dup 3) (plus:DI (match_dup 0) (match_dup 1))) + (set (mem:V4SI (match_dup 3)) + (match_dup 2))] + "") + +(define_expand "load_macho_picbase" + [(set (reg:SI 65) + (unspec [(match_operand 0 "" "")] + UNSPEC_LD_MPIC))] + "(DEFAULT_ABI == ABI_DARWIN) && flag_pic" +{ + if (TARGET_32BIT) + emit_insn (gen_load_macho_picbase_si (operands[0])); + else + emit_insn (gen_load_macho_picbase_di (operands[0])); + + DONE; +}) + +(define_insn "load_macho_picbase_si" + [(set (reg:SI 65) + (unspec:SI [(match_operand:SI 0 "immediate_operand" "s") + (pc)] UNSPEC_LD_MPIC))] + "(DEFAULT_ABI == ABI_DARWIN) && flag_pic" + "bcl 20,31,%0\\n%0:" + [(set_attr "type" "branch") + (set_attr "length" "4")]) + +(define_insn "load_macho_picbase_di" + [(set (reg:DI 65) + (unspec:DI [(match_operand:DI 0 "immediate_operand" "s") + (pc)] UNSPEC_LD_MPIC))] + "(DEFAULT_ABI == ABI_DARWIN) && flag_pic && TARGET_64BIT" + "bcl 20,31,%0\\n%0:" + [(set_attr "type" "branch") + (set_attr "length" "4")]) + +(define_expand "macho_correct_pic" + [(set (match_operand 0 "" "") + (plus (match_operand 1 "" "") + (unspec [(match_operand 2 "" "") + (match_operand 3 "" "")] + UNSPEC_MPIC_CORRECT)))] + "DEFAULT_ABI == ABI_DARWIN" +{ + if (TARGET_32BIT) + emit_insn (gen_macho_correct_pic_si (operands[0], operands[1], operands[2], + operands[3])); + else + emit_insn (gen_macho_correct_pic_di (operands[0], operands[1], operands[2], + operands[3])); + + DONE; +}) + +(define_insn "macho_correct_pic_si" + [(set (match_operand:SI 0 "gpc_reg_operand" "=r") + (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r") + (unspec:SI [(match_operand:SI 2 "immediate_operand" "s") + (match_operand:SI 3 "immediate_operand" "s")] + UNSPEC_MPIC_CORRECT)))] + "DEFAULT_ABI == ABI_DARWIN" + "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)" + [(set_attr "length" "8")]) + +(define_insn "macho_correct_pic_di" + [(set (match_operand:DI 0 "gpc_reg_operand" "=r") + (plus:DI (match_operand:DI 1 "gpc_reg_operand" "r") + (unspec:DI [(match_operand:DI 2 "immediate_operand" "s") + (match_operand:DI 3 "immediate_operand" "s")] + 16)))] + "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT" + "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)" + [(set_attr "length" "8")]) + +(define_insn "*call_indirect_nonlocal_darwin64" + [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l,c,*l")) + (match_operand 1 "" "g,g,g,g")) + (use (match_operand:SI 2 "immediate_operand" "O,O,n,n")) + (clobber (reg:SI 65))] + "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT" +{ + return "b%T0l"; +} + [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg") + (set_attr "length" "4,4,8,8")]) + +(define_insn "*call_nonlocal_darwin64" + [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s")) + (match_operand 1 "" "g,g")) + (use (match_operand:SI 2 "immediate_operand" "O,n")) + (clobber (reg:SI 65))] + "(DEFAULT_ABI == ABI_DARWIN) + && (INTVAL (operands[2]) & CALL_LONG) == 0" +{ +#if TARGET_MACHO + return output_call(insn, operands, 0, 2); +#else + gcc_unreachable (); +#endif +} + [(set_attr "type" "branch,branch") + (set_attr "length" "4,8")]) + +(define_insn "*call_value_indirect_nonlocal_darwin64" + [(set (match_operand 0 "" "") + (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l,c,*l")) + (match_operand 2 "" "g,g,g,g"))) + (use (match_operand:SI 3 "immediate_operand" "O,O,n,n")) + (clobber (reg:SI 65))] + "DEFAULT_ABI == ABI_DARWIN" +{ + return "b%T1l"; +} + [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg") + (set_attr "length" "4,4,8,8")]) + +(define_insn "*call_value_nonlocal_darwin64" + [(set (match_operand 0 "" "") + (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s")) + (match_operand 2 "" "g,g"))) + (use (match_operand:SI 3 "immediate_operand" "O,n")) + (clobber (reg:SI 65))] + "(DEFAULT_ABI == ABI_DARWIN) + && (INTVAL (operands[3]) & CALL_LONG) == 0" +{ +#if TARGET_MACHO + return output_call(insn, operands, 1, 3); +#else + gcc_unreachable (); +#endif +} + [(set_attr "type" "branch,branch") + (set_attr "length" "4,8")]) + +(define_insn "*sibcall_nonlocal_darwin64" + [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s")) + (match_operand 1 "" "")) + (use (match_operand 2 "immediate_operand" "O,n")) + (use (reg:SI 65)) + (return)] + "(DEFAULT_ABI == ABI_DARWIN) + && (INTVAL (operands[2]) & CALL_LONG) == 0" +{ + return "b %z0"; +} + [(set_attr "type" "branch,branch") + (set_attr "length" "4,8")]) + +(define_insn "*sibcall_value_nonlocal_darwin64" + [(set (match_operand 0 "" "") + (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s")) + (match_operand 2 "" ""))) + (use (match_operand:SI 3 "immediate_operand" "O,n")) + (use (reg:SI 65)) + (return)] + "(DEFAULT_ABI == ABI_DARWIN) + && (INTVAL (operands[3]) & CALL_LONG) == 0" + "* +{ + return \"b %z1\"; +}" + [(set_attr "type" "branch,branch") + (set_attr "length" "4,8")]) + + +(define_insn "*sibcall_symbolic_64" + [(call (mem:SI (match_operand:DI 0 "call_operand" "s,c")) ; 64 + (match_operand 1 "" "")) + (use (match_operand 2 "" "")) + (use (reg:SI 65)) + (return)] + "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN" + "* +{ + switch (which_alternative) + { + case 0: return \"b %z0\"; + case 1: return \"b%T0\"; + default: gcc_unreachable (); + } +}" + [(set_attr "type" "branch") + (set_attr "length" "4")]) + +(define_insn "*sibcall_value_symbolic_64" + [(set (match_operand 0 "" "") + (call (mem:SI (match_operand:DI 1 "call_operand" "s,c")) + (match_operand 2 "" ""))) + (use (match_operand:SI 3 "" "")) + (use (reg:SI 65)) + (return)] + "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN" + "* +{ + switch (which_alternative) + { + case 0: return \"b %z1\"; + case 1: return \"b%T1\"; + default: gcc_unreachable (); + } +}" + [(set_attr "type" "branch") + (set_attr "length" "4")]) diff --git a/gcc/config/rs6000/darwin.opt b/gcc/config/rs6000/darwin.opt new file mode 100644 index 000000000..3787511b6 --- /dev/null +++ b/gcc/config/rs6000/darwin.opt @@ -0,0 +1,42 @@ +; Darwin options for PPC port. +; +; Copyright (C) 2005, 2007, 2010 Free Software Foundation, Inc. +; Contributed by Aldy Hernandez . +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT +; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +; License for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; . + +Waltivec-long-deprecated +Driver Alias(mwarn-altivec-long) + +faltivec +Driver + +; -ffix-and-continue and -findirect-data are for compatibility for old +; compilers. +ffix-and-continue +Driver RejectNegative Alias(mfix-and-continue) + +findirect-data +Driver RejectNegative Alias(mfix-and-continue) + +m64 +Target RejectNegative Negative(m32) Mask(64BIT) +Generate 64-bit code + +m32 +Target RejectNegative Negative(m64) InverseMask(64BIT) +Generate 32-bit code diff --git a/gcc/config/rs6000/darwin64.h b/gcc/config/rs6000/darwin64.h new file mode 100644 index 000000000..a74ceb17f --- /dev/null +++ b/gcc/config/rs6000/darwin64.h @@ -0,0 +1,35 @@ +/* Target definitions for PowerPC running Darwin (Mac OS X). + Copyright (C) 2006, 2007, 2010 Free Software Foundation, Inc. + Contributed by Apple Computer Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC64)"); + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_POWERPC64 | MASK_64BIT \ + | MASK_MULTIPLE | MASK_NEW_MNEMONICS | MASK_PPC_GFXOPT) + +#undef DARWIN_ARCH_SPEC +#define DARWIN_ARCH_SPEC "%{m32:ppc;:ppc64}" + +#undef DARWIN_SUBARCH_SPEC +#define DARWIN_SUBARCH_SPEC DARWIN_ARCH_SPEC + +#undef DARWIN_CRT2_SPEC +#define DARWIN_CRT2_SPEC "" diff --git a/gcc/config/rs6000/darwin7.h b/gcc/config/rs6000/darwin7.h new file mode 100644 index 000000000..fdf371666 --- /dev/null +++ b/gcc/config/rs6000/darwin7.h @@ -0,0 +1,30 @@ +/* Target definitions for Darwin 7.x (Mac OS X) systems. + Copyright (C) 2004, 2005, 2007 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Machine dependent libraries. Include libmx when compiling for + Darwin 7.0 and above, but before libSystem, since the functions are + actually in libSystem but for 7.x compatibility we want them to be + looked for in libmx first. Include libmx by default because otherwise + libstdc++ isn't usable. */ + +#undef LIB_SPEC +#define LIB_SPEC "%{!static:\ + %:version-compare(!< 10.3 mmacosx-version-min= -lmx)\ + -lSystem}" diff --git a/gcc/config/rs6000/darwin8.h b/gcc/config/rs6000/darwin8.h new file mode 100644 index 000000000..7cdd81db7 --- /dev/null +++ b/gcc/config/rs6000/darwin8.h @@ -0,0 +1,32 @@ +/* Target definitions for Darwin 8.0 and above (Mac OS X) systems. + Copyright (C) 2004, 2005, 2007 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Machine dependent libraries. Include libmx when compiling on + Darwin 7.0 and above, but before libSystem, since the functions are + actually in libSystem but for 7.x compatibility we want them to be + looked for in libmx first---but only do this if 7.x compatibility + is a concern, which it's not in 64-bit mode. Include + libSystemStubs when compiling on (not necessarily for) 8.0 and + above and not 64-bit long double. */ + +#undef LIB_SPEC +#define LIB_SPEC "%{!static:\ + %{!mlong-double-64:%{pg:-lSystemStubs_profile;:-lSystemStubs}} \ + %{!m64:%:version-compare(>< 10.3 10.4 mmacosx-version-min= -lmx)} -lSystem}" diff --git a/gcc/config/rs6000/default64.h b/gcc/config/rs6000/default64.h new file mode 100644 index 000000000..0ff49aab9 --- /dev/null +++ b/gcc/config/rs6000/default64.h @@ -0,0 +1,24 @@ +/* Definitions of target machine for GNU compiler, + for 64 bit powerpc linux defaulting to -m64. + Copyright (C) 2003, 2005, 2007 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT \ + (MASK_POWERPC | MASK_PPC_GFXOPT | \ + MASK_POWERPC64 | MASK_64BIT | MASK_NEW_MNEMONICS) diff --git a/gcc/config/rs6000/dfp.md b/gcc/config/rs6000/dfp.md new file mode 100644 index 000000000..5ffe7fcff --- /dev/null +++ b/gcc/config/rs6000/dfp.md @@ -0,0 +1,594 @@ +;; Decimal Floating Point (DFP) patterns. +;; Copyright (C) 2007, 2008, 2010 +;; Free Software Foundation, Inc. +;; Contributed by Ben Elliston (bje@au.ibm.com) and Peter Bergner +;; (bergner@vnet.ibm.com). + +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; +;; UNSPEC usage +;; + +(define_constants + [(UNSPEC_MOVSD_LOAD 400) + (UNSPEC_MOVSD_STORE 401) + ]) + + +(define_expand "movsd" + [(set (match_operand:SD 0 "nonimmediate_operand" "") + (match_operand:SD 1 "any_operand" ""))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "{ rs6000_emit_move (operands[0], operands[1], SDmode); DONE; }") + +(define_split + [(set (match_operand:SD 0 "gpc_reg_operand" "") + (match_operand:SD 1 "const_double_operand" ""))] + "reload_completed + && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31) + || (GET_CODE (operands[0]) == SUBREG + && GET_CODE (SUBREG_REG (operands[0])) == REG + && REGNO (SUBREG_REG (operands[0])) <= 31))" + [(set (match_dup 2) (match_dup 3))] + " +{ + long l; + REAL_VALUE_TYPE rv; + + REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]); + REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l); + + if (! TARGET_POWERPC64) + operands[2] = operand_subword (operands[0], 0, 0, SDmode); + else + operands[2] = gen_lowpart (SImode, operands[0]); + + operands[3] = gen_int_mode (l, SImode); +}") + +(define_insn "movsd_hardfloat" + [(set (match_operand:SD 0 "nonimmediate_operand" "=r,r,m,f,*c*l,*q,!r,*h,!r,!r") + (match_operand:SD 1 "input_operand" "r,m,r,f,r,r,h,0,G,Fn"))] + "(gpc_reg_operand (operands[0], SDmode) + || gpc_reg_operand (operands[1], SDmode)) + && (TARGET_HARD_FLOAT && TARGET_FPRS)" + "@ + mr %0,%1 + {l%U1%X1|lwz%U1%X1} %0,%1 + {st%U0%X0|stw%U0%X0} %1,%0 + fmr %0,%1 + mt%0 %1 + mt%0 %1 + mf%1 %0 + {cror 0,0,0|nop} + # + #" + [(set_attr "type" "*,load,store,fp,mtjmpr,*,mfjmpr,*,*,*") + (set_attr "length" "4,4,4,4,4,4,4,4,4,8")]) + +(define_insn "movsd_softfloat" + [(set (match_operand:SD 0 "nonimmediate_operand" "=r,cl,q,r,r,m,r,r,r,r,r,*h") + (match_operand:SD 1 "input_operand" "r,r,r,h,m,r,I,L,R,G,Fn,0"))] + "(gpc_reg_operand (operands[0], SDmode) + || gpc_reg_operand (operands[1], SDmode)) + && (TARGET_SOFT_FLOAT || !TARGET_FPRS)" + "@ + mr %0,%1 + mt%0 %1 + mt%0 %1 + mf%1 %0 + {l%U1%X1|lwz%U1%X1} %0,%1 + {st%U0%X0|stw%U0%X0} %1,%0 + {lil|li} %0,%1 + {liu|lis} %0,%v1 + {cal|la} %0,%a1 + # + # + {cror 0,0,0|nop}" + [(set_attr "type" "*,mtjmpr,*,mfjmpr,load,store,*,*,*,*,*,*") + (set_attr "length" "4,4,4,4,4,4,4,4,4,4,8,4")]) + +(define_insn "movsd_store" + [(set (match_operand:DD 0 "nonimmediate_operand" "=m") + (unspec:DD [(match_operand:SD 1 "input_operand" "d")] + UNSPEC_MOVSD_STORE))] + "(gpc_reg_operand (operands[0], DDmode) + || gpc_reg_operand (operands[1], SDmode)) + && TARGET_HARD_FLOAT && TARGET_FPRS" + "stfd%U0%X0 %1,%0" + [(set_attr "type" "fpstore") + (set_attr "length" "4")]) + +(define_insn "movsd_load" + [(set (match_operand:SD 0 "nonimmediate_operand" "=f") + (unspec:SD [(match_operand:DD 1 "input_operand" "m")] + UNSPEC_MOVSD_LOAD))] + "(gpc_reg_operand (operands[0], SDmode) + || gpc_reg_operand (operands[1], DDmode)) + && TARGET_HARD_FLOAT && TARGET_FPRS" + "lfd%U1%X1 %0,%1" + [(set_attr "type" "fpload") + (set_attr "length" "4")]) + +;; Hardware support for decimal floating point operations. + +(define_insn "extendsddd2" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (float_extend:DD (match_operand:SD 1 "gpc_reg_operand" "f")))] + "TARGET_DFP" + "dctdp %0,%1" + [(set_attr "type" "fp")]) + +(define_expand "extendsdtd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (float_extend:TD (match_operand:SD 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" +{ + rtx tmp = gen_reg_rtx (DDmode); + emit_insn (gen_extendsddd2 (tmp, operands[1])); + emit_insn (gen_extendddtd2 (operands[0], tmp)); + DONE; +}) + +(define_insn "truncddsd2" + [(set (match_operand:SD 0 "gpc_reg_operand" "=f") + (float_truncate:SD (match_operand:DD 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "drsp %0,%1" + [(set_attr "type" "fp")]) + +(define_expand "negdd2" + [(set (match_operand:DD 0 "gpc_reg_operand" "") + (neg:DD (match_operand:DD 1 "gpc_reg_operand" "")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + +(define_insn "*negdd2_fpr" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (neg:DD (match_operand:DD 1 "gpc_reg_operand" "d")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "fneg %0,%1" + [(set_attr "type" "fp")]) + +(define_expand "absdd2" + [(set (match_operand:DD 0 "gpc_reg_operand" "") + (abs:DD (match_operand:DD 1 "gpc_reg_operand" "")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + +(define_insn "*absdd2_fpr" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (abs:DD (match_operand:DD 1 "gpc_reg_operand" "d")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "fabs %0,%1" + [(set_attr "type" "fp")]) + +(define_insn "*nabsdd2_fpr" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (neg:DD (abs:DD (match_operand:DD 1 "gpc_reg_operand" "d"))))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "fnabs %0,%1" + [(set_attr "type" "fp")]) + +(define_expand "movdd" + [(set (match_operand:DD 0 "nonimmediate_operand" "") + (match_operand:DD 1 "any_operand" ""))] + "" + "{ rs6000_emit_move (operands[0], operands[1], DDmode); DONE; }") + +(define_split + [(set (match_operand:DD 0 "gpc_reg_operand" "") + (match_operand:DD 1 "const_int_operand" ""))] + "! TARGET_POWERPC64 && reload_completed + && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31) + || (GET_CODE (operands[0]) == SUBREG + && GET_CODE (SUBREG_REG (operands[0])) == REG + && REGNO (SUBREG_REG (operands[0])) <= 31))" + [(set (match_dup 2) (match_dup 4)) + (set (match_dup 3) (match_dup 1))] + " +{ + int endian = (WORDS_BIG_ENDIAN == 0); + HOST_WIDE_INT value = INTVAL (operands[1]); + + operands[2] = operand_subword (operands[0], endian, 0, DDmode); + operands[3] = operand_subword (operands[0], 1 - endian, 0, DDmode); +#if HOST_BITS_PER_WIDE_INT == 32 + operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx; +#else + operands[4] = GEN_INT (value >> 32); + operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000); +#endif +}") + +(define_split + [(set (match_operand:DD 0 "gpc_reg_operand" "") + (match_operand:DD 1 "const_double_operand" ""))] + "! TARGET_POWERPC64 && reload_completed + && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31) + || (GET_CODE (operands[0]) == SUBREG + && GET_CODE (SUBREG_REG (operands[0])) == REG + && REGNO (SUBREG_REG (operands[0])) <= 31))" + [(set (match_dup 2) (match_dup 4)) + (set (match_dup 3) (match_dup 5))] + " +{ + int endian = (WORDS_BIG_ENDIAN == 0); + long l[2]; + REAL_VALUE_TYPE rv; + + REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]); + REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l); + + operands[2] = operand_subword (operands[0], endian, 0, DDmode); + operands[3] = operand_subword (operands[0], 1 - endian, 0, DDmode); + operands[4] = gen_int_mode (l[endian], SImode); + operands[5] = gen_int_mode (l[1 - endian], SImode); +}") + +(define_split + [(set (match_operand:DD 0 "gpc_reg_operand" "") + (match_operand:DD 1 "const_double_operand" ""))] + "TARGET_POWERPC64 && reload_completed + && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31) + || (GET_CODE (operands[0]) == SUBREG + && GET_CODE (SUBREG_REG (operands[0])) == REG + && REGNO (SUBREG_REG (operands[0])) <= 31))" + [(set (match_dup 2) (match_dup 3))] + " +{ + int endian = (WORDS_BIG_ENDIAN == 0); + long l[2]; + REAL_VALUE_TYPE rv; +#if HOST_BITS_PER_WIDE_INT >= 64 + HOST_WIDE_INT val; +#endif + + REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]); + REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l); + + operands[2] = gen_lowpart (DImode, operands[0]); + /* HIGHPART is lower memory address when WORDS_BIG_ENDIAN. */ +#if HOST_BITS_PER_WIDE_INT >= 64 + val = ((HOST_WIDE_INT)(unsigned long)l[endian] << 32 + | ((HOST_WIDE_INT)(unsigned long)l[1 - endian])); + + operands[3] = gen_int_mode (val, DImode); +#else + operands[3] = immed_double_const (l[1 - endian], l[endian], DImode); +#endif +}") + +;; Don't have reload use general registers to load a constant. First, +;; it might not work if the output operand is the equivalent of +;; a non-offsettable memref, but also it is less efficient than loading +;; the constant into an FP register, since it will probably be used there. +;; The "??" is a kludge until we can figure out a more reasonable way +;; of handling these non-offsettable values. +(define_insn "*movdd_hardfloat32" + [(set (match_operand:DD 0 "nonimmediate_operand" "=!r,??r,m,d,d,m,!r,!r,!r") + (match_operand:DD 1 "input_operand" "r,m,r,d,m,d,G,H,F"))] + "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS + && (gpc_reg_operand (operands[0], DDmode) + || gpc_reg_operand (operands[1], DDmode))" + "* +{ + switch (which_alternative) + { + default: + gcc_unreachable (); + case 0: + case 1: + case 2: + return \"#\"; + case 3: + return \"fmr %0,%1\"; + case 4: + return \"lfd%U1%X1 %0,%1\"; + case 5: + return \"stfd%U0%X0 %1,%0\"; + case 6: + case 7: + case 8: + return \"#\"; + } +}" + [(set_attr "type" "two,load,store,fp,fpload,fpstore,*,*,*") + (set_attr "length" "8,16,16,4,4,4,8,12,16")]) + +(define_insn "*movdd_softfloat32" + [(set (match_operand:DD 0 "nonimmediate_operand" "=r,r,m,r,r,r") + (match_operand:DD 1 "input_operand" "r,m,r,G,H,F"))] + "! TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS) + && (gpc_reg_operand (operands[0], DDmode) + || gpc_reg_operand (operands[1], DDmode))" + "#" + [(set_attr "type" "two,load,store,*,*,*") + (set_attr "length" "8,8,8,8,12,16")]) + +; ld/std require word-aligned displacements -> 'Y' constraint. +; List Y->r and r->Y before r->r for reload. +(define_insn "*movdd_hardfloat64_mfpgpr" + [(set (match_operand:DD 0 "nonimmediate_operand" "=Y,r,!r,d,d,m,*c*l,!r,*h,!r,!r,!r,r,d") + (match_operand:DD 1 "input_operand" "r,Y,r,d,m,d,r,h,0,G,H,F,d,r"))] + "TARGET_POWERPC64 && TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS + && (gpc_reg_operand (operands[0], DDmode) + || gpc_reg_operand (operands[1], DDmode))" + "@ + std%U0%X0 %1,%0 + ld%U1%X1 %0,%1 + mr %0,%1 + fmr %0,%1 + lfd%U1%X1 %0,%1 + stfd%U0%X0 %1,%0 + mt%0 %1 + mf%1 %0 + {cror 0,0,0|nop} + # + # + # + mftgpr %0,%1 + mffgpr %0,%1" + [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*,mftgpr,mffgpr") + (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16,4,4")]) + +; ld/std require word-aligned displacements -> 'Y' constraint. +; List Y->r and r->Y before r->r for reload. +(define_insn "*movdd_hardfloat64" + [(set (match_operand:DD 0 "nonimmediate_operand" "=Y,r,!r,d,d,m,*c*l,!r,*h,!r,!r,!r") + (match_operand:DD 1 "input_operand" "r,Y,r,d,m,d,r,h,0,G,H,F"))] + "TARGET_POWERPC64 && !TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS + && (gpc_reg_operand (operands[0], DDmode) + || gpc_reg_operand (operands[1], DDmode))" + "@ + std%U0%X0 %1,%0 + ld%U1%X1 %0,%1 + mr %0,%1 + fmr %0,%1 + lfd%U1%X1 %0,%1 + stfd%U0%X0 %1,%0 + mt%0 %1 + mf%1 %0 + {cror 0,0,0|nop} + # + # + #" + [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*") + (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16")]) + +(define_insn "*movdd_softfloat64" + [(set (match_operand:DD 0 "nonimmediate_operand" "=r,Y,r,cl,r,r,r,r,*h") + (match_operand:DD 1 "input_operand" "Y,r,r,r,h,G,H,F,0"))] + "TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS) + && (gpc_reg_operand (operands[0], DDmode) + || gpc_reg_operand (operands[1], DDmode))" + "@ + ld%U1%X1 %0,%1 + std%U0%X0 %1,%0 + mr %0,%1 + mt%0 %1 + mf%1 %0 + # + # + # + {cror 0,0,0|nop}" + [(set_attr "type" "load,store,*,mtjmpr,mfjmpr,*,*,*,*") + (set_attr "length" "4,4,4,4,4,8,12,16,4")]) + +(define_expand "negtd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "") + (neg:TD (match_operand:TD 1 "gpc_reg_operand" "")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + +(define_insn "*negtd2_fpr" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (neg:TD (match_operand:TD 1 "gpc_reg_operand" "d")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "fneg %0,%1" + [(set_attr "type" "fp")]) + +(define_expand "abstd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "") + (abs:TD (match_operand:TD 1 "gpc_reg_operand" "")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "") + +(define_insn "*abstd2_fpr" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (abs:TD (match_operand:TD 1 "gpc_reg_operand" "d")))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "fabs %0,%1" + [(set_attr "type" "fp")]) + +(define_insn "*nabstd2_fpr" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (neg:TD (abs:TD (match_operand:TD 1 "gpc_reg_operand" "d"))))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "fnabs %0,%1" + [(set_attr "type" "fp")]) + +(define_expand "movtd" + [(set (match_operand:TD 0 "general_operand" "") + (match_operand:TD 1 "any_operand" ""))] + "TARGET_HARD_FLOAT && TARGET_FPRS" + "{ rs6000_emit_move (operands[0], operands[1], TDmode); DONE; }") + +; It's important to list the o->f and f->o moves before f->f because +; otherwise reload, given m->f, will try to pick f->f and reload it, +; which doesn't make progress. Likewise r->Y must be before r->r. +(define_insn_and_split "*movtd_internal" + [(set (match_operand:TD 0 "nonimmediate_operand" "=o,d,d,r,Y,r") + (match_operand:TD 1 "input_operand" "d,o,d,YGHF,r,r"))] + "TARGET_HARD_FLOAT && TARGET_FPRS + && (gpc_reg_operand (operands[0], TDmode) + || gpc_reg_operand (operands[1], TDmode))" + "#" + "&& reload_completed" + [(pc)] +{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; } + [(set_attr "length" "8,8,8,20,20,16")]) + +;; Hardware support for decimal floating point operations. + +(define_insn "extendddtd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (float_extend:TD (match_operand:DD 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dctqpq %0,%1" + [(set_attr "type" "fp")]) + +;; The result of drdpq is an even/odd register pair with the converted +;; value in the even register and zero in the odd register. +;; FIXME: Avoid the register move by using a reload constraint to ensure +;; that the result is the first of the pair receiving the result of drdpq. + +(define_insn "trunctddd2" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (float_truncate:DD (match_operand:TD 1 "gpc_reg_operand" "d"))) + (clobber (match_scratch:TD 2 "=d"))] + "TARGET_DFP" + "drdpq %2,%1\;fmr %0,%2" + [(set_attr "type" "fp")]) + +(define_insn "adddd3" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (plus:DD (match_operand:DD 1 "gpc_reg_operand" "%d") + (match_operand:DD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dadd %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "addtd3" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (plus:TD (match_operand:TD 1 "gpc_reg_operand" "%d") + (match_operand:TD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "daddq %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "subdd3" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (minus:DD (match_operand:DD 1 "gpc_reg_operand" "d") + (match_operand:DD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dsub %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "subtd3" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (minus:TD (match_operand:TD 1 "gpc_reg_operand" "d") + (match_operand:TD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dsubq %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "muldd3" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (mult:DD (match_operand:DD 1 "gpc_reg_operand" "%d") + (match_operand:DD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dmul %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "multd3" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (mult:TD (match_operand:TD 1 "gpc_reg_operand" "%d") + (match_operand:TD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dmulq %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "divdd3" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (div:DD (match_operand:DD 1 "gpc_reg_operand" "d") + (match_operand:DD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "ddiv %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "divtd3" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (div:TD (match_operand:TD 1 "gpc_reg_operand" "d") + (match_operand:TD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "ddivq %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "*cmpdd_internal1" + [(set (match_operand:CCFP 0 "cc_reg_operand" "=y") + (compare:CCFP (match_operand:DD 1 "gpc_reg_operand" "d") + (match_operand:DD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dcmpu %0,%1,%2" + [(set_attr "type" "fpcompare")]) + +(define_insn "*cmptd_internal1" + [(set (match_operand:CCFP 0 "cc_reg_operand" "=y") + (compare:CCFP (match_operand:TD 1 "gpc_reg_operand" "d") + (match_operand:TD 2 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dcmpuq %0,%1,%2" + [(set_attr "type" "fpcompare")]) + +(define_insn "floatditd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (float:TD (match_operand:DI 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dcffixq %0,%1" + [(set_attr "type" "fp")]) + +;; Convert a decimal64 to a decimal64 whose value is an integer. +;; This is the first stage of converting it to an integer type. + +(define_insn "ftruncdd2" + [(set (match_operand:DD 0 "gpc_reg_operand" "=d") + (fix:DD (match_operand:DD 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "drintn. 0,%0,%1,1" + [(set_attr "type" "fp")]) + +;; Convert a decimal64 whose value is an integer to an actual integer. +;; This is the second stage of converting decimal float to integer type. + +(define_insn "fixdddi2" + [(set (match_operand:DI 0 "gpc_reg_operand" "=d") + (fix:DI (match_operand:DD 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dctfix %0,%1" + [(set_attr "type" "fp")]) + +;; Convert a decimal128 to a decimal128 whose value is an integer. +;; This is the first stage of converting it to an integer type. + +(define_insn "ftrunctd2" + [(set (match_operand:TD 0 "gpc_reg_operand" "=d") + (fix:TD (match_operand:TD 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "drintnq. 0,%0,%1,1" + [(set_attr "type" "fp")]) + +;; Convert a decimal128 whose value is an integer to an actual integer. +;; This is the second stage of converting decimal float to integer type. + +(define_insn "fixtddi2" + [(set (match_operand:DI 0 "gpc_reg_operand" "=d") + (fix:DI (match_operand:TD 1 "gpc_reg_operand" "d")))] + "TARGET_DFP" + "dctfixq %0,%1" + [(set_attr "type" "fp")]) diff --git a/gcc/config/rs6000/driver-rs6000.c b/gcc/config/rs6000/driver-rs6000.c new file mode 100644 index 000000000..11e76ea96 --- /dev/null +++ b/gcc/config/rs6000/driver-rs6000.c @@ -0,0 +1,547 @@ +/* Subroutines for the gcc driver. + Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include + +#ifdef _AIX +# include +#endif + +#ifdef __linux__ +# include +#endif + +#if defined (__APPLE__) || (__FreeBSD__) +# include +# include +#endif + +const char *host_detect_local_cpu (int argc, const char **argv); + +#if GCC_VERSION >= 0 + +/* Returns parameters that describe L1_ASSOC associative cache of size + L1_SIZEKB with lines of size L1_LINE, and L2_SIZEKB. */ + +static char * +describe_cache (unsigned l1_sizekb, unsigned l1_line, + unsigned l1_assoc ATTRIBUTE_UNUSED, unsigned l2_sizekb) +{ + char l1size[1000], line[1000], l2size[1000]; + + /* At the moment, gcc middle-end does not use the information about the + associativity of the cache. */ + + sprintf (l1size, "--param l1-cache-size=%u", l1_sizekb); + sprintf (line, "--param l1-cache-line-size=%u", l1_line); + sprintf (l2size, "--param l2-cache-size=%u", l2_sizekb); + + return concat (l1size, " ", line, " ", l2size, " ", NULL); +} + +#ifdef __APPLE__ + +/* Returns the description of caches on Darwin. */ + +static char * +detect_caches_darwin (void) +{ + unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb; + size_t len = 4; + static int l1_size_name[2] = { CTL_HW, HW_L1DCACHESIZE }; + static int l1_line_name[2] = { CTL_HW, HW_CACHELINE }; + static int l2_size_name[2] = { CTL_HW, HW_L2CACHESIZE }; + + sysctl (l1_size_name, 2, &l1_sizekb, &len, NULL, 0); + sysctl (l1_line_name, 2, &l1_line, &len, NULL, 0); + sysctl (l2_size_name, 2, &l2_sizekb, &len, NULL, 0); + l1_assoc = 0; + + return describe_cache (l1_sizekb / 1024, l1_line, l1_assoc, + l2_sizekb / 1024); +} + +static const char * +detect_processor_darwin (void) +{ + unsigned int proc; + size_t len = 4; + + sysctlbyname ("hw.cpusubtype", &proc, &len, NULL, 0); + + if (len > 0) + switch (proc) + { + case 1: + return "601"; + case 2: + return "602"; + case 3: + return "603"; + case 4: + case 5: + return "603e"; + case 6: + return "604"; + case 7: + return "604e"; + case 8: + return "620"; + case 9: + return "750"; + case 10: + return "7400"; + case 11: + return "7450"; + case 100: + return "970"; + default: + return "powerpc"; + } + + return "powerpc"; +} + +#endif /* __APPLE__ */ + +#ifdef __FreeBSD__ + +/* Returns the description of caches on FreeBSD PPC. */ + +static char * +detect_caches_freebsd (void) +{ + unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb; + size_t len = 4; + + /* Currently, as of FreeBSD-7.0, there is only the cacheline_size + available via sysctl. */ + sysctlbyname ("machdep.cacheline_size", &l1_line, &len, NULL, 0); + + l1_sizekb = 32; + l1_assoc = 0; + l2_sizekb = 512; + + return describe_cache (l1_sizekb, l1_line, l1_assoc, l2_sizekb); +} + +/* Currently returns default powerpc. */ +static const char * +detect_processor_freebsd (void) +{ + return "powerpc"; +} + +#endif /* __FreeBSD__ */ + +#ifdef __linux__ + +/* Returns AT_PLATFORM if present, otherwise generic PowerPC. */ + +static const char * +elf_platform (void) +{ + int fd; + + fd = open ("/proc/self/auxv", O_RDONLY); + + if (fd != -1) + { + char buf[1024]; + ElfW(auxv_t) *av; + ssize_t n; + + n = read (fd, buf, sizeof (buf)); + close (fd); + + if (n > 0) + { + for (av = (ElfW(auxv_t) *) buf; av->a_type != AT_NULL; ++av) + switch (av->a_type) + { + case AT_PLATFORM: + return (const char *) av->a_un.a_val; + + default: + break; + } + } + } + return NULL; +} + +/* Returns AT_PLATFORM if present, otherwise generic 32. */ + +static int +elf_dcachebsize (void) +{ + int fd; + + fd = open ("/proc/self/auxv", O_RDONLY); + + if (fd != -1) + { + char buf[1024]; + ElfW(auxv_t) *av; + ssize_t n; + + n = read (fd, buf, sizeof (buf)); + close (fd); + + if (n > 0) + { + for (av = (ElfW(auxv_t) *) buf; av->a_type != AT_NULL; ++av) + switch (av->a_type) + { + case AT_DCACHEBSIZE: + return av->a_un.a_val; + + default: + break; + } + } + } + return 32; +} + +/* Returns the description of caches on Linux. */ + +static char * +detect_caches_linux (void) +{ + unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb; + const char *platform; + + platform = elf_platform (); + + if (platform != NULL) + { + l1_line = 128; + + if (platform[5] == '6') + /* POWER6 and POWER6x */ + l1_sizekb = 64; + else + l1_sizekb = 32; + } + else + { + l1_line = elf_dcachebsize (); + l1_sizekb = 32; + } + + l1_assoc = 0; + l2_sizekb = 512; + + return describe_cache (l1_sizekb, l1_line, l1_assoc, l2_sizekb); +} + +static const char * +detect_processor_linux (void) +{ + const char *platform; + + platform = elf_platform (); + + if (platform != NULL) + return platform; + else + return "powerpc"; +} + +#endif /* __linux__ */ + +#ifdef _AIX +/* Returns the description of caches on AIX. */ + +static char * +detect_caches_aix (void) +{ + unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb; + + l1_sizekb = _system_configuration.dcache_size / 1024; + l1_line = _system_configuration.dcache_line; + l1_assoc = _system_configuration.dcache_asc; + l2_sizekb = _system_configuration.L2_cache_size / 1024; + + return describe_cache (l1_sizekb, l1_line, l1_assoc, l2_sizekb); +} + + +/* Returns the processor implementation on AIX. */ + +static const char * +detect_processor_aix (void) +{ + switch (_system_configuration.implementation) + { + case 0x0001: + return "rios1"; + + case 0x0002: + return "rsc"; + + case 0x0004: + return "rios2"; + + case 0x0008: + return "601"; + + case 0x0020: + return "603"; + + case 0x0010: + return "604"; + + case 0x0040: + return "620"; + + case 0x0080: + return "630"; + + case 0x0100: + case 0x0200: + case 0x0400: + return "rs64"; + + case 0x0800: + return "power4"; + + case 0x2000: + if (_system_configuration.version == 0x0F0000) + return "power5"; + else + return "power5+"; + + case 0x4000: + return "power6"; + + default: + return "powerpc"; + } +} +#endif /* _AIX */ + + +/* + * Array to map -mcpu=native names to the switches passed to the assembler. + * This list mirrors the specs in ASM_CPU_SPEC, and any changes made here + * should be made there as well. + */ + +struct asm_name { + const char *cpu; + const char *asm_sw; +}; + +static const struct asm_name asm_names[] = { +#if defined (_AIX) + { "power3", "-m620" }, + { "power4", "-mpwr4" }, + { "power5", "-mpwr5" }, + { "power5+", "-mpwr5x" }, + { "power6", "-mpwr6" }, + { "power6x", "-mpwr6" }, + { "power7", "-mpwr7" }, + { "powerpc", "-mppc" }, + { "rs64a", "-mppc" }, + { "603", "-m603" }, + { "603e", "-m603" }, + { "604", "-m604" }, + { "604e", "-m604" }, + { "620", "-m620" }, + { "630", "-m620" }, + { "970", "-m970" }, + { "G5", "-m970" }, + { NULL, "\ +%{!maix64: \ +%{mpowerpc64: -mppc64} \ +%{maltivec: -m970} \ +%{!maltivec: %{!mpower64: %(asm_default)}}}" }, + +#else + { "common", "-mcom" }, + { "cell", "-mcell" }, + { "power", "-mpwr" }, + { "power2", "-mpwrx" }, + { "power3", "-mppc64" }, + { "power4", "-mpower4" }, + { "power5", "%(asm_cpu_power5)" }, + { "power5+", "%(asm_cpu_power5)" }, + { "power6", "%(asm_cpu_power6) -maltivec" }, + { "power6x", "%(asm_cpu_power6) -maltivec" }, + { "power7", "%(asm_cpu_power7)" }, + { "powerpc", "-mppc" }, + { "rios", "-mpwr" }, + { "rios1", "-mpwr" }, + { "rios2", "-mpwrx" }, + { "rsc", "-mpwr" }, + { "rsc1", "-mpwr" }, + { "rs64a", "-mppc64" }, + { "401", "-mppc" }, + { "403", "-m403" }, + { "405", "-m405" }, + { "405fp", "-m405" }, + { "440", "-m440" }, + { "440fp", "-m440" }, + { "464", "-m440" }, + { "464fp", "-m440" }, + { "505", "-mppc" }, + { "601", "-m601" }, + { "602", "-mppc" }, + { "603", "-mppc" }, + { "603e", "-mppc" }, + { "ec603e", "-mppc" }, + { "604", "-mppc" }, + { "604e", "-mppc" }, + { "620", "-mppc64" }, + { "630", "-mppc64" }, + { "740", "-mppc" }, + { "750", "-mppc" }, + { "G3", "-mppc" }, + { "7400", "-mppc -maltivec" }, + { "7450", "-mppc -maltivec" }, + { "G4", "-mppc -maltivec" }, + { "801", "-mppc" }, + { "821", "-mppc" }, + { "823", "-mppc" }, + { "860", "-mppc" }, + { "970", "-mpower4 -maltivec" }, + { "G5", "-mpower4 -maltivec" }, + { "8540", "-me500" }, + { "8548", "-me500" }, + { "e300c2", "-me300" }, + { "e300c3", "-me300" }, + { "e500mc", "-me500mc" }, + { NULL, "\ +%{mpower: %{!mpower2: -mpwr}} \ +%{mpower2: -mpwrx} \ +%{mpowerpc64*: -mppc64} \ +%{!mpowerpc64*: %{mpowerpc*: -mppc}} \ +%{mno-power: %{!mpowerpc*: -mcom}} \ +%{!mno-power: %{!mpower*: %(asm_default)}}" }, +#endif +}; + +/* This will be called by the spec parser in gcc.c when it sees + a %:local_cpu_detect(args) construct. Currently it will be called + with either "arch" or "tune" as argument depending on if -march=native + or -mtune=native is to be substituted. + + Additionally it will be called with "asm" to select the appropriate flags + for the assembler. + + It returns a string containing new command line parameters to be + put at the place of the above two options, depending on what CPU + this is executed. + + ARGC and ARGV are set depending on the actual arguments given + in the spec. */ +const char * +host_detect_local_cpu (int argc, const char **argv) +{ + const char *cpu = NULL; + const char *cache = ""; + const char *options = ""; + bool arch; + bool assembler; + size_t i; + + if (argc < 1) + return NULL; + + arch = strcmp (argv[0], "cpu") == 0; + assembler = (!arch && strcmp (argv[0], "asm") == 0); + if (!arch && !assembler && strcmp (argv[0], "tune")) + return NULL; + + if (! assembler) + { +#if defined (_AIX) + cache = detect_caches_aix (); +#elif defined (__APPLE__) + cache = detect_caches_darwin (); +#elif defined (__FreeBSD__) + cache = detect_caches_freebsd (); + /* FreeBSD PPC does not provide any cache information yet. */ + cache = ""; +#elif defined (__linux__) + cache = detect_caches_linux (); + /* PPC Linux does not provide any cache information yet. */ + cache = ""; +#else + cache = ""; +#endif + } + +#if defined (_AIX) + cpu = detect_processor_aix (); +#elif defined (__APPLE__) + cpu = detect_processor_darwin (); +#elif defined (__FreeBSD__) + cpu = detect_processor_freebsd (); +#elif defined (__linux__) + cpu = detect_processor_linux (); +#else + cpu = "powerpc"; +#endif + + if (assembler) + { + for (i = 0; i < sizeof (asm_names) / sizeof (asm_names[0]); i++) + { + if (!asm_names[i].cpu || !strcmp (asm_names[i].cpu, cpu)) + return asm_names[i].asm_sw; + } + + return NULL; + } + + return concat (cache, "-m", argv[0], "=", cpu, " ", options, NULL); +} + +#else /* GCC_VERSION */ + +/* If we aren't compiling with GCC we just provide a minimal + default value. */ +const char * +host_detect_local_cpu (int argc, const char **argv) +{ + const char *cpu; + bool arch; + + if (argc < 1) + return NULL; + + arch = strcmp (argv[0], "cpu") == 0; + if (!arch && strcmp (argv[0], "tune")) + return NULL; + + if (arch) + cpu = "powerpc"; + + return concat ("-m", argv[0], "=", cpu, NULL); +} + +#endif /* GCC_VERSION */ + diff --git a/gcc/config/rs6000/e300c2c3.md b/gcc/config/rs6000/e300c2c3.md new file mode 100644 index 000000000..3462a209f --- /dev/null +++ b/gcc/config/rs6000/e300c2c3.md @@ -0,0 +1,189 @@ +;; Pipeline description for Motorola PowerPC e300c3 core. +;; Copyright (C) 2008, 2009 Free Software Foundation, Inc. +;; Contributed by Edmar Wienskoski (edmar@freescale.com) +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "ppce300c3_most,ppce300c3_long,ppce300c3_retire") +(define_cpu_unit "ppce300c3_decode_0,ppce300c3_decode_1" "ppce300c3_most") + +;; We don't simulate general issue queue (GIC). If we have SU insn +;; and then SU1 insn, they can not be issued on the same cycle +;; (although SU1 insn and then SU insn can be issued) because the SU +;; insn will go to SU1 from GIC0 entry. Fortunately, the first cycle +;; multipass insn scheduling will find the situation and issue the SU1 +;; insn and then the SU insn. +(define_cpu_unit "ppce300c3_issue_0,ppce300c3_issue_1" "ppce300c3_most") + +;; We could describe completion buffers slots in combination with the +;; retirement units and the order of completion but the result +;; automaton would behave in the same way because we can not describe +;; real latency time with taking in order completion into account. +;; Actually we could define the real latency time by querying reserved +;; automaton units but the current scheduler uses latency time before +;; issuing insns and making any reservations. +;; +;; So our description is aimed to achieve a insn schedule in which the +;; insns would not wait in the completion buffer. +(define_cpu_unit "ppce300c3_retire_0,ppce300c3_retire_1" "ppce300c3_retire") + +;; Branch unit: +(define_cpu_unit "ppce300c3_bu" "ppce300c3_most") + +;; IU: +(define_cpu_unit "ppce300c3_iu0_stage0,ppce300c3_iu1_stage0" "ppce300c3_most") + +;; IU: This used to describe non-pipelined division. +(define_cpu_unit "ppce300c3_mu_div" "ppce300c3_long") + +;; SRU: +(define_cpu_unit "ppce300c3_sru_stage0" "ppce300c3_most") + +;; Here we simplified LSU unit description not describing the stages. +(define_cpu_unit "ppce300c3_lsu" "ppce300c3_most") + +;; FPU: +(define_cpu_unit "ppce300c3_fpu" "ppce300c3_most") + +;; The following units are used to make automata deterministic +(define_cpu_unit "present_ppce300c3_decode_0" "ppce300c3_most") +(define_cpu_unit "present_ppce300c3_issue_0" "ppce300c3_most") +(define_cpu_unit "present_ppce300c3_retire_0" "ppce300c3_retire") +(define_cpu_unit "present_ppce300c3_iu0_stage0" "ppce300c3_most") + +;; The following sets to make automata deterministic when option ndfa is used. +(presence_set "present_ppce300c3_decode_0" "ppce300c3_decode_0") +(presence_set "present_ppce300c3_issue_0" "ppce300c3_issue_0") +(presence_set "present_ppce300c3_retire_0" "ppce300c3_retire_0") +(presence_set "present_ppce300c3_iu0_stage0" "ppce300c3_iu0_stage0") + +;; Some useful abbreviations. +(define_reservation "ppce300c3_decode" + "ppce300c3_decode_0|ppce300c3_decode_1+present_ppce300c3_decode_0") +(define_reservation "ppce300c3_issue" + "ppce300c3_issue_0|ppce300c3_issue_1+present_ppce300c3_issue_0") +(define_reservation "ppce300c3_retire" + "ppce300c3_retire_0|ppce300c3_retire_1+present_ppce300c3_retire_0") +(define_reservation "ppce300c3_iu_stage0" + "ppce300c3_iu0_stage0|ppce300c3_iu1_stage0+present_ppce300c3_iu0_stage0") + +;; Compares can be executed either one of the IU or SRU +(define_insn_reservation "ppce300c3_cmp" 1 + (and (eq_attr "type" "cmp,compare,delayed_compare,fast_compare") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+(ppce300c3_iu_stage0|ppce300c3_sru_stage0) \ + +ppce300c3_retire") + +;; Other one cycle IU insns +(define_insn_reservation "ppce300c3_iu" 1 + (and (eq_attr "type" "integer,insert_word,isel") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0+ppce300c3_retire") + +;; Branch. Actually this latency time is not used by the scheduler. +(define_insn_reservation "ppce300c3_branch" 1 + (and (eq_attr "type" "jmpreg,branch") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_bu,ppce300c3_retire") + +;; Multiply is non-pipelined but can be executed in any IU +(define_insn_reservation "ppce300c3_multiply" 2 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0, \ + ppce300c3_iu_stage0+ppce300c3_retire") + +;; Divide. We use the average latency time here. We omit reserving a +;; retire unit because of the result automata will be huge. +(define_insn_reservation "ppce300c3_divide" 20 + (and (eq_attr "type" "idiv") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0+ppce300c3_mu_div,\ + ppce300c3_mu_div*19") + +;; CR logical +(define_insn_reservation "ppce300c3_cr_logical" 1 + (and (eq_attr "type" "cr_logical,delayed_cr") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") + +;; Mfcr +(define_insn_reservation "ppce300c3_mfcr" 1 + (and (eq_attr "type" "mfcr") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") + +;; Mtcrf +(define_insn_reservation "ppce300c3_mtcrf" 1 + (and (eq_attr "type" "mtcr") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") + +;; Mtjmpr +(define_insn_reservation "ppce300c3_mtjmpr" 1 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire") + +;; Float point instructions +(define_insn_reservation "ppce300c3_fpcompare" 3 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppce300c3")) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,nothing,ppce300c3_retire") + +(define_insn_reservation "ppce300c3_fp" 3 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "ppce300c3")) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,nothing,ppce300c3_retire") + +(define_insn_reservation "ppce300c3_dmul" 4 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "ppce300c3")) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu,nothing,ppce300c3_retire") + +; Divides are not pipelined +(define_insn_reservation "ppce300c3_sdiv" 18 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppce300c3")) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu*17") + +(define_insn_reservation "ppce300c3_ddiv" 33 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppce300c3")) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu*32") + +;; Loads +(define_insn_reservation "ppce300c3_load" 2 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") + +(define_insn_reservation "ppce300c3_fpload" 2 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppce300c3")) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") + +;; Stores. +(define_insn_reservation "ppce300c3_store" 2 + (and (eq_attr "type" "store,store_ux,store_u") + (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3"))) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") + +(define_insn_reservation "ppce300c3_fpstore" 2 + (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppce300c3")) + "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire") diff --git a/gcc/config/rs6000/e500-double.h b/gcc/config/rs6000/e500-double.h new file mode 100644 index 000000000..5545a8c93 --- /dev/null +++ b/gcc/config/rs6000/e500-double.h @@ -0,0 +1,24 @@ +/* Target definitions for E500 with double precision FP. + Copyright (C) 2004, 2006, 2007 Free Software Foundation, Inc. + Contributed by Aldy Hernandez (aldyh@redhat.com). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef SUB3TARGET_OVERRIDE_OPTIONS +#define SUB3TARGET_OVERRIDE_OPTIONS \ + if (!rs6000_explicit_options.float_gprs) \ + rs6000_float_gprs = 2; diff --git a/gcc/config/rs6000/e500.h b/gcc/config/rs6000/e500.h new file mode 100644 index 000000000..807df0900 --- /dev/null +++ b/gcc/config/rs6000/e500.h @@ -0,0 +1,57 @@ +/* Enable E500 support. + Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software + Foundation, Inc. + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef TARGET_SPE_ABI +#undef TARGET_SPE +#undef TARGET_E500 +#undef TARGET_FPRS +#undef TARGET_E500_SINGLE +#undef TARGET_E500_DOUBLE +#undef CHECK_E500_OPTIONS + +#define TARGET_SPE_ABI rs6000_spe_abi +#define TARGET_SPE rs6000_spe +#define TARGET_E500 (rs6000_cpu == PROCESSOR_PPC8540) +#define TARGET_FPRS (rs6000_float_gprs == 0) +#define TARGET_E500_SINGLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 1) +#define TARGET_E500_DOUBLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 2) +#define CHECK_E500_OPTIONS \ + do { \ + if (TARGET_E500 || TARGET_SPE || TARGET_SPE_ABI \ + || TARGET_E500_SINGLE || TARGET_E500_DOUBLE) \ + { \ + if (TARGET_ALTIVEC) \ + error ("AltiVec and E500 instructions cannot coexist"); \ + if (TARGET_VSX) \ + error ("VSX and E500 instructions cannot coexist"); \ + if (TARGET_64BIT) \ + error ("64-bit E500 not supported"); \ + if (TARGET_HARD_FLOAT && TARGET_FPRS) \ + error ("E500 and FPRs not supported"); \ + } \ + } while (0) + +/* Override rs6000.h definition. */ +#undef HARD_REGNO_CALLER_SAVE_MODE +/* When setting up caller-save slots (MODE == VOIDmode) ensure we + allocate space for DFmode. Save gprs in the correct mode too. */ +#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ + (TARGET_E500_DOUBLE && ((MODE) == VOIDmode || (MODE) == DFmode) \ + ? DFmode \ + : choose_hard_reg_mode ((REGNO), (NREGS), false)) diff --git a/gcc/config/rs6000/e500crtres32gpr.asm b/gcc/config/rs6000/e500crtres32gpr.asm new file mode 100644 index 000000000..6fbff820b --- /dev/null +++ b/gcc/config/rs6000/e500crtres32gpr.asm @@ -0,0 +1,73 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for restoring 32-bit integer registers, called by the compiler. */ +/* "Bare" versions that simply return to their caller. */ + +HIDDEN_FUNC(_rest32gpr_14) lwz 14,-72(11) +HIDDEN_FUNC(_rest32gpr_15) lwz 15,-68(11) +HIDDEN_FUNC(_rest32gpr_16) lwz 16,-64(11) +HIDDEN_FUNC(_rest32gpr_17) lwz 17,-60(11) +HIDDEN_FUNC(_rest32gpr_18) lwz 18,-56(11) +HIDDEN_FUNC(_rest32gpr_19) lwz 19,-52(11) +HIDDEN_FUNC(_rest32gpr_20) lwz 20,-48(11) +HIDDEN_FUNC(_rest32gpr_21) lwz 21,-44(11) +HIDDEN_FUNC(_rest32gpr_22) lwz 22,-40(11) +HIDDEN_FUNC(_rest32gpr_23) lwz 23,-36(11) +HIDDEN_FUNC(_rest32gpr_24) lwz 24,-32(11) +HIDDEN_FUNC(_rest32gpr_25) lwz 25,-28(11) +HIDDEN_FUNC(_rest32gpr_26) lwz 26,-24(11) +HIDDEN_FUNC(_rest32gpr_27) lwz 27,-20(11) +HIDDEN_FUNC(_rest32gpr_28) lwz 28,-16(11) +HIDDEN_FUNC(_rest32gpr_29) lwz 29,-12(11) +HIDDEN_FUNC(_rest32gpr_30) lwz 30,-8(11) +HIDDEN_FUNC(_rest32gpr_31) lwz 31,-4(11) + blr +FUNC_END(_rest32gpr_31) +FUNC_END(_rest32gpr_30) +FUNC_END(_rest32gpr_29) +FUNC_END(_rest32gpr_28) +FUNC_END(_rest32gpr_27) +FUNC_END(_rest32gpr_26) +FUNC_END(_rest32gpr_25) +FUNC_END(_rest32gpr_24) +FUNC_END(_rest32gpr_23) +FUNC_END(_rest32gpr_22) +FUNC_END(_rest32gpr_21) +FUNC_END(_rest32gpr_20) +FUNC_END(_rest32gpr_19) +FUNC_END(_rest32gpr_18) +FUNC_END(_rest32gpr_17) +FUNC_END(_rest32gpr_16) +FUNC_END(_rest32gpr_15) +FUNC_END(_rest32gpr_14) + +#endif diff --git a/gcc/config/rs6000/e500crtres64gpr.asm b/gcc/config/rs6000/e500crtres64gpr.asm new file mode 100644 index 000000000..5182e5539 --- /dev/null +++ b/gcc/config/rs6000/e500crtres64gpr.asm @@ -0,0 +1,73 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for restoring 64-bit integer registers, called by the compiler. */ +/* "Bare" versions that return to their caller. */ + +HIDDEN_FUNC(_rest64gpr_14) evldd 14,0(11) +HIDDEN_FUNC(_rest64gpr_15) evldd 15,8(11) +HIDDEN_FUNC(_rest64gpr_16) evldd 16,16(11) +HIDDEN_FUNC(_rest64gpr_17) evldd 17,24(11) +HIDDEN_FUNC(_rest64gpr_18) evldd 18,32(11) +HIDDEN_FUNC(_rest64gpr_19) evldd 19,40(11) +HIDDEN_FUNC(_rest64gpr_20) evldd 20,48(11) +HIDDEN_FUNC(_rest64gpr_21) evldd 21,56(11) +HIDDEN_FUNC(_rest64gpr_22) evldd 22,64(11) +HIDDEN_FUNC(_rest64gpr_23) evldd 23,72(11) +HIDDEN_FUNC(_rest64gpr_24) evldd 24,80(11) +HIDDEN_FUNC(_rest64gpr_25) evldd 25,88(11) +HIDDEN_FUNC(_rest64gpr_26) evldd 26,96(11) +HIDDEN_FUNC(_rest64gpr_27) evldd 27,104(11) +HIDDEN_FUNC(_rest64gpr_28) evldd 28,112(11) +HIDDEN_FUNC(_rest64gpr_29) evldd 29,120(11) +HIDDEN_FUNC(_rest64gpr_30) evldd 30,128(11) +HIDDEN_FUNC(_rest64gpr_31) evldd 31,136(11) + blr +FUNC_END(_rest64gpr_31) +FUNC_END(_rest64gpr_30) +FUNC_END(_rest64gpr_29) +FUNC_END(_rest64gpr_28) +FUNC_END(_rest64gpr_27) +FUNC_END(_rest64gpr_26) +FUNC_END(_rest64gpr_25) +FUNC_END(_rest64gpr_24) +FUNC_END(_rest64gpr_23) +FUNC_END(_rest64gpr_22) +FUNC_END(_rest64gpr_21) +FUNC_END(_rest64gpr_20) +FUNC_END(_rest64gpr_19) +FUNC_END(_rest64gpr_18) +FUNC_END(_rest64gpr_17) +FUNC_END(_rest64gpr_16) +FUNC_END(_rest64gpr_15) +FUNC_END(_rest64gpr_14) + +#endif diff --git a/gcc/config/rs6000/e500crtres64gprctr.asm b/gcc/config/rs6000/e500crtres64gprctr.asm new file mode 100644 index 000000000..74309d6be --- /dev/null +++ b/gcc/config/rs6000/e500crtres64gprctr.asm @@ -0,0 +1,90 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for restoring 64-bit integer registers where the number of + registers to be restored is passed in CTR, called by the compiler. */ + +HIDDEN_FUNC(_rest64gpr_ctr_14) evldd 14,0(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_15) evldd 15,8(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_16) evldd 16,16(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_17) evldd 17,24(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_18) evldd 18,32(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_19) evldd 19,40(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_20) evldd 20,48(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_21) evldd 21,56(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_22) evldd 22,64(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_23) evldd 23,72(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_24) evldd 24,80(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_25) evldd 25,88(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_26) evldd 26,96(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_27) evldd 27,104(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_28) evldd 28,112(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_29) evldd 29,120(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_30) evldd 30,128(11) + bdz _rest64gpr_ctr_done +HIDDEN_FUNC(_rest64gpr_ctr_31) evldd 31,136(11) +_rest64gpr_ctr_done: blr +FUNC_END(_rest64gpr_ctr_31) +FUNC_END(_rest64gpr_ctr_30) +FUNC_END(_rest64gpr_ctr_29) +FUNC_END(_rest64gpr_ctr_28) +FUNC_END(_rest64gpr_ctr_27) +FUNC_END(_rest64gpr_ctr_26) +FUNC_END(_rest64gpr_ctr_25) +FUNC_END(_rest64gpr_ctr_24) +FUNC_END(_rest64gpr_ctr_23) +FUNC_END(_rest64gpr_ctr_22) +FUNC_END(_rest64gpr_ctr_21) +FUNC_END(_rest64gpr_ctr_20) +FUNC_END(_rest64gpr_ctr_19) +FUNC_END(_rest64gpr_ctr_18) +FUNC_END(_rest64gpr_ctr_17) +FUNC_END(_rest64gpr_ctr_16) +FUNC_END(_rest64gpr_ctr_15) +FUNC_END(_rest64gpr_ctr_14) + +#endif diff --git a/gcc/config/rs6000/e500crtrest32gpr.asm b/gcc/config/rs6000/e500crtrest32gpr.asm new file mode 100644 index 000000000..4e61010dc --- /dev/null +++ b/gcc/config/rs6000/e500crtrest32gpr.asm @@ -0,0 +1,75 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for restoring 32-bit integer registers, called by the compiler. */ +/* "Tail" versions that perform a tail call. */ + +HIDDEN_FUNC(_rest32gpr_14_t) lwz 14,-72(11) +HIDDEN_FUNC(_rest32gpr_15_t) lwz 15,-68(11) +HIDDEN_FUNC(_rest32gpr_16_t) lwz 16,-64(11) +HIDDEN_FUNC(_rest32gpr_17_t) lwz 17,-60(11) +HIDDEN_FUNC(_rest32gpr_18_t) lwz 18,-56(11) +HIDDEN_FUNC(_rest32gpr_19_t) lwz 19,-52(11) +HIDDEN_FUNC(_rest32gpr_20_t) lwz 20,-48(11) +HIDDEN_FUNC(_rest32gpr_21_t) lwz 21,-44(11) +HIDDEN_FUNC(_rest32gpr_22_t) lwz 22,-40(11) +HIDDEN_FUNC(_rest32gpr_23_t) lwz 23,-36(11) +HIDDEN_FUNC(_rest32gpr_24_t) lwz 24,-32(11) +HIDDEN_FUNC(_rest32gpr_25_t) lwz 25,-28(11) +HIDDEN_FUNC(_rest32gpr_26_t) lwz 26,-24(11) +HIDDEN_FUNC(_rest32gpr_27_t) lwz 27,-20(11) +HIDDEN_FUNC(_rest32gpr_28_t) lwz 28,-16(11) +HIDDEN_FUNC(_rest32gpr_29_t) lwz 29,-12(11) +HIDDEN_FUNC(_rest32gpr_30_t) lwz 30,-8(11) +HIDDEN_FUNC(_rest32gpr_31_t) lwz 31,-4(11) + lwz 0,4(11) + mr 1,11 + blr +FUNC_END(_rest32gpr_31_t) +FUNC_END(_rest32gpr_30_t) +FUNC_END(_rest32gpr_29_t) +FUNC_END(_rest32gpr_28_t) +FUNC_END(_rest32gpr_27_t) +FUNC_END(_rest32gpr_26_t) +FUNC_END(_rest32gpr_25_t) +FUNC_END(_rest32gpr_24_t) +FUNC_END(_rest32gpr_23_t) +FUNC_END(_rest32gpr_22_t) +FUNC_END(_rest32gpr_21_t) +FUNC_END(_rest32gpr_20_t) +FUNC_END(_rest32gpr_19_t) +FUNC_END(_rest32gpr_18_t) +FUNC_END(_rest32gpr_17_t) +FUNC_END(_rest32gpr_16_t) +FUNC_END(_rest32gpr_15_t) +FUNC_END(_rest32gpr_14_t) + +#endif diff --git a/gcc/config/rs6000/e500crtrest64gpr.asm b/gcc/config/rs6000/e500crtrest64gpr.asm new file mode 100644 index 000000000..090786fdc --- /dev/null +++ b/gcc/config/rs6000/e500crtrest64gpr.asm @@ -0,0 +1,74 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* "Tail" versions that perform a tail call. */ + +HIDDEN_FUNC(_rest64gpr_14_t) evldd 14,0(11) +HIDDEN_FUNC(_rest64gpr_15_t) evldd 15,8(11) +HIDDEN_FUNC(_rest64gpr_16_t) evldd 16,16(11) +HIDDEN_FUNC(_rest64gpr_17_t) evldd 17,24(11) +HIDDEN_FUNC(_rest64gpr_18_t) evldd 18,32(11) +HIDDEN_FUNC(_rest64gpr_19_t) evldd 19,40(11) +HIDDEN_FUNC(_rest64gpr_20_t) evldd 20,48(11) +HIDDEN_FUNC(_rest64gpr_21_t) evldd 21,56(11) +HIDDEN_FUNC(_rest64gpr_22_t) evldd 22,64(11) +HIDDEN_FUNC(_rest64gpr_23_t) evldd 23,72(11) +HIDDEN_FUNC(_rest64gpr_24_t) evldd 24,80(11) +HIDDEN_FUNC(_rest64gpr_25_t) evldd 25,88(11) +HIDDEN_FUNC(_rest64gpr_26_t) evldd 26,96(11) +HIDDEN_FUNC(_rest64gpr_27_t) evldd 27,104(11) +HIDDEN_FUNC(_rest64gpr_28_t) evldd 28,112(11) +HIDDEN_FUNC(_rest64gpr_29_t) evldd 29,120(11) +HIDDEN_FUNC(_rest64gpr_30_t) evldd 30,128(11) +HIDDEN_FUNC(_rest64gpr_31_t) lwz 0,148(11) + evldd 31,136(11) + addi 1,11,144 + blr +FUNC_END(_rest64gpr_31_t) +FUNC_END(_rest64gpr_30_t) +FUNC_END(_rest64gpr_29_t) +FUNC_END(_rest64gpr_28_t) +FUNC_END(_rest64gpr_27_t) +FUNC_END(_rest64gpr_26_t) +FUNC_END(_rest64gpr_25_t) +FUNC_END(_rest64gpr_24_t) +FUNC_END(_rest64gpr_23_t) +FUNC_END(_rest64gpr_22_t) +FUNC_END(_rest64gpr_21_t) +FUNC_END(_rest64gpr_20_t) +FUNC_END(_rest64gpr_19_t) +FUNC_END(_rest64gpr_18_t) +FUNC_END(_rest64gpr_17_t) +FUNC_END(_rest64gpr_16_t) +FUNC_END(_rest64gpr_15_t) +FUNC_END(_rest64gpr_14_t) + +#endif diff --git a/gcc/config/rs6000/e500crtresx32gpr.asm b/gcc/config/rs6000/e500crtresx32gpr.asm new file mode 100644 index 000000000..0b35245df --- /dev/null +++ b/gcc/config/rs6000/e500crtresx32gpr.asm @@ -0,0 +1,75 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for restoring 32-bit integer registers, called by the compiler. */ +/* "Exit" versions that return to the caller's caller. */ + +HIDDEN_FUNC(_rest32gpr_14_x) lwz 14,-72(11) +HIDDEN_FUNC(_rest32gpr_15_x) lwz 15,-68(11) +HIDDEN_FUNC(_rest32gpr_16_x) lwz 16,-64(11) +HIDDEN_FUNC(_rest32gpr_17_x) lwz 17,-60(11) +HIDDEN_FUNC(_rest32gpr_18_x) lwz 18,-56(11) +HIDDEN_FUNC(_rest32gpr_19_x) lwz 19,-52(11) +HIDDEN_FUNC(_rest32gpr_20_x) lwz 20,-48(11) +HIDDEN_FUNC(_rest32gpr_21_x) lwz 21,-44(11) +HIDDEN_FUNC(_rest32gpr_22_x) lwz 22,-40(11) +HIDDEN_FUNC(_rest32gpr_23_x) lwz 23,-36(11) +HIDDEN_FUNC(_rest32gpr_24_x) lwz 24,-32(11) +HIDDEN_FUNC(_rest32gpr_25_x) lwz 25,-28(11) +HIDDEN_FUNC(_rest32gpr_26_x) lwz 26,-24(11) +HIDDEN_FUNC(_rest32gpr_27_x) lwz 27,-20(11) +HIDDEN_FUNC(_rest32gpr_28_x) lwz 28,-16(11) +HIDDEN_FUNC(_rest32gpr_29_x) lwz 29,-12(11) +HIDDEN_FUNC(_rest32gpr_30_x) lwz 30,-8(11) +HIDDEN_FUNC(_rest32gpr_31_x) lwz 0,4(11) + lwz 31,-4(11) + mr 1,11 + mtlr 0 + blr +FUNC_END(_rest32gpr_31_x) +FUNC_END(_rest32gpr_30_x) +FUNC_END(_rest32gpr_29_x) +FUNC_END(_rest32gpr_28_x) +FUNC_END(_rest32gpr_27_x) +FUNC_END(_rest32gpr_26_x) +FUNC_END(_rest32gpr_25_x) +FUNC_END(_rest32gpr_24_x) +FUNC_END(_rest32gpr_23_x) +FUNC_END(_rest32gpr_22_x) +FUNC_END(_rest32gpr_21_x) +FUNC_END(_rest32gpr_20_x) +FUNC_END(_rest32gpr_19_x) +FUNC_END(_rest32gpr_18_x) +FUNC_END(_rest32gpr_17_x) +FUNC_END(_rest32gpr_16_x) +FUNC_END(_rest32gpr_15_x) +FUNC_END(_rest32gpr_14_x) + +#endif diff --git a/gcc/config/rs6000/e500crtresx64gpr.asm b/gcc/config/rs6000/e500crtresx64gpr.asm new file mode 100644 index 000000000..ce2a6cfa2 --- /dev/null +++ b/gcc/config/rs6000/e500crtresx64gpr.asm @@ -0,0 +1,75 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* "Exit" versions that return to their caller's caller. */ + +HIDDEN_FUNC(_rest64gpr_14_x) evldd 14,0(11) +HIDDEN_FUNC(_rest64gpr_15_x) evldd 15,8(11) +HIDDEN_FUNC(_rest64gpr_16_x) evldd 16,16(11) +HIDDEN_FUNC(_rest64gpr_17_x) evldd 17,24(11) +HIDDEN_FUNC(_rest64gpr_18_x) evldd 18,32(11) +HIDDEN_FUNC(_rest64gpr_19_x) evldd 19,40(11) +HIDDEN_FUNC(_rest64gpr_20_x) evldd 20,48(11) +HIDDEN_FUNC(_rest64gpr_21_x) evldd 21,56(11) +HIDDEN_FUNC(_rest64gpr_22_x) evldd 22,64(11) +HIDDEN_FUNC(_rest64gpr_23_x) evldd 23,72(11) +HIDDEN_FUNC(_rest64gpr_24_x) evldd 24,80(11) +HIDDEN_FUNC(_rest64gpr_25_x) evldd 25,88(11) +HIDDEN_FUNC(_rest64gpr_26_x) evldd 26,96(11) +HIDDEN_FUNC(_rest64gpr_27_x) evldd 27,104(11) +HIDDEN_FUNC(_rest64gpr_28_x) evldd 28,112(11) +HIDDEN_FUNC(_rest64gpr_29_x) evldd 29,120(11) +HIDDEN_FUNC(_rest64gpr_30_x) evldd 30,128(11) +HIDDEN_FUNC(_rest64gpr_31_x) lwz 0,148(11) + evldd 31,136(11) + addi 1,11,144 + mtlr 0 + blr +FUNC_END(_rest64gpr_31_x) +FUNC_END(_rest64gpr_30_x) +FUNC_END(_rest64gpr_29_x) +FUNC_END(_rest64gpr_28_x) +FUNC_END(_rest64gpr_27_x) +FUNC_END(_rest64gpr_26_x) +FUNC_END(_rest64gpr_25_x) +FUNC_END(_rest64gpr_24_x) +FUNC_END(_rest64gpr_23_x) +FUNC_END(_rest64gpr_22_x) +FUNC_END(_rest64gpr_21_x) +FUNC_END(_rest64gpr_20_x) +FUNC_END(_rest64gpr_19_x) +FUNC_END(_rest64gpr_18_x) +FUNC_END(_rest64gpr_17_x) +FUNC_END(_rest64gpr_16_x) +FUNC_END(_rest64gpr_15_x) +FUNC_END(_rest64gpr_14_x) + +#endif diff --git a/gcc/config/rs6000/e500crtsav32gpr.asm b/gcc/config/rs6000/e500crtsav32gpr.asm new file mode 100644 index 000000000..c89103050 --- /dev/null +++ b/gcc/config/rs6000/e500crtsav32gpr.asm @@ -0,0 +1,73 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for saving 32-bit integer registers, called by the compiler. */ +/* "Bare" versions that simply return to their caller. */ + +HIDDEN_FUNC(_save32gpr_14) stw 14,-72(11) +HIDDEN_FUNC(_save32gpr_15) stw 15,-68(11) +HIDDEN_FUNC(_save32gpr_16) stw 16,-64(11) +HIDDEN_FUNC(_save32gpr_17) stw 17,-60(11) +HIDDEN_FUNC(_save32gpr_18) stw 18,-56(11) +HIDDEN_FUNC(_save32gpr_19) stw 19,-52(11) +HIDDEN_FUNC(_save32gpr_20) stw 20,-48(11) +HIDDEN_FUNC(_save32gpr_21) stw 21,-44(11) +HIDDEN_FUNC(_save32gpr_22) stw 22,-40(11) +HIDDEN_FUNC(_save32gpr_23) stw 23,-36(11) +HIDDEN_FUNC(_save32gpr_24) stw 24,-32(11) +HIDDEN_FUNC(_save32gpr_25) stw 25,-28(11) +HIDDEN_FUNC(_save32gpr_26) stw 26,-24(11) +HIDDEN_FUNC(_save32gpr_27) stw 27,-20(11) +HIDDEN_FUNC(_save32gpr_28) stw 28,-16(11) +HIDDEN_FUNC(_save32gpr_29) stw 29,-12(11) +HIDDEN_FUNC(_save32gpr_30) stw 30,-8(11) +HIDDEN_FUNC(_save32gpr_31) stw 31,-4(11) + blr +FUNC_END(_save32gpr_31) +FUNC_END(_save32gpr_30) +FUNC_END(_save32gpr_29) +FUNC_END(_save32gpr_28) +FUNC_END(_save32gpr_27) +FUNC_END(_save32gpr_26) +FUNC_END(_save32gpr_25) +FUNC_END(_save32gpr_24) +FUNC_END(_save32gpr_23) +FUNC_END(_save32gpr_22) +FUNC_END(_save32gpr_21) +FUNC_END(_save32gpr_20) +FUNC_END(_save32gpr_19) +FUNC_END(_save32gpr_18) +FUNC_END(_save32gpr_17) +FUNC_END(_save32gpr_16) +FUNC_END(_save32gpr_15) +FUNC_END(_save32gpr_14) + +#endif diff --git a/gcc/config/rs6000/e500crtsav64gpr.asm b/gcc/config/rs6000/e500crtsav64gpr.asm new file mode 100644 index 000000000..2a5d3e475 --- /dev/null +++ b/gcc/config/rs6000/e500crtsav64gpr.asm @@ -0,0 +1,72 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for saving 64-bit integer registers, called by the compiler. */ + +HIDDEN_FUNC(_save64gpr_14) evstdd 14,0(11) +HIDDEN_FUNC(_save64gpr_15) evstdd 15,8(11) +HIDDEN_FUNC(_save64gpr_16) evstdd 16,16(11) +HIDDEN_FUNC(_save64gpr_17) evstdd 17,24(11) +HIDDEN_FUNC(_save64gpr_18) evstdd 18,32(11) +HIDDEN_FUNC(_save64gpr_19) evstdd 19,40(11) +HIDDEN_FUNC(_save64gpr_20) evstdd 20,48(11) +HIDDEN_FUNC(_save64gpr_21) evstdd 21,56(11) +HIDDEN_FUNC(_save64gpr_22) evstdd 22,64(11) +HIDDEN_FUNC(_save64gpr_23) evstdd 23,72(11) +HIDDEN_FUNC(_save64gpr_24) evstdd 24,80(11) +HIDDEN_FUNC(_save64gpr_25) evstdd 25,88(11) +HIDDEN_FUNC(_save64gpr_26) evstdd 26,96(11) +HIDDEN_FUNC(_save64gpr_27) evstdd 27,104(11) +HIDDEN_FUNC(_save64gpr_28) evstdd 28,112(11) +HIDDEN_FUNC(_save64gpr_29) evstdd 29,120(11) +HIDDEN_FUNC(_save64gpr_30) evstdd 30,128(11) +HIDDEN_FUNC(_save64gpr_31) evstdd 31,136(11) + blr +FUNC_END(_save64gpr_31) +FUNC_END(_save64gpr_30) +FUNC_END(_save64gpr_29) +FUNC_END(_save64gpr_28) +FUNC_END(_save64gpr_27) +FUNC_END(_save64gpr_26) +FUNC_END(_save64gpr_25) +FUNC_END(_save64gpr_24) +FUNC_END(_save64gpr_23) +FUNC_END(_save64gpr_22) +FUNC_END(_save64gpr_21) +FUNC_END(_save64gpr_20) +FUNC_END(_save64gpr_19) +FUNC_END(_save64gpr_18) +FUNC_END(_save64gpr_17) +FUNC_END(_save64gpr_16) +FUNC_END(_save64gpr_15) +FUNC_END(_save64gpr_14) + +#endif diff --git a/gcc/config/rs6000/e500crtsav64gprctr.asm b/gcc/config/rs6000/e500crtsav64gprctr.asm new file mode 100644 index 000000000..dd0bdf3c8 --- /dev/null +++ b/gcc/config/rs6000/e500crtsav64gprctr.asm @@ -0,0 +1,91 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for saving 64-bit integer registers where the number of + registers to be saved is passed in CTR, called by the compiler. */ +/* "Bare" versions that return to their caller. */ + +HIDDEN_FUNC(_save64gpr_ctr_14) evstdd 14,0(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_15) evstdd 15,8(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_16) evstdd 16,16(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_17) evstdd 17,24(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_18) evstdd 18,32(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_19) evstdd 19,40(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_20) evstdd 20,48(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_21) evstdd 21,56(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_22) evstdd 22,64(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_23) evstdd 23,72(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_24) evstdd 24,80(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_25) evstdd 25,88(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_26) evstdd 26,96(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_27) evstdd 27,104(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_28) evstdd 28,112(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_29) evstdd 29,120(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_30) evstdd 30,128(11) + bdz _save64gpr_ctr_done +HIDDEN_FUNC(_save64gpr_ctr_31) evstdd 31,136(11) +_save64gpr_ctr_done: blr +FUNC_END(_save64gpr_ctr_31) +FUNC_END(_save64gpr_ctr_30) +FUNC_END(_save64gpr_ctr_29) +FUNC_END(_save64gpr_ctr_28) +FUNC_END(_save64gpr_ctr_27) +FUNC_END(_save64gpr_ctr_26) +FUNC_END(_save64gpr_ctr_25) +FUNC_END(_save64gpr_ctr_24) +FUNC_END(_save64gpr_ctr_23) +FUNC_END(_save64gpr_ctr_22) +FUNC_END(_save64gpr_ctr_21) +FUNC_END(_save64gpr_ctr_20) +FUNC_END(_save64gpr_ctr_19) +FUNC_END(_save64gpr_ctr_18) +FUNC_END(_save64gpr_ctr_17) +FUNC_END(_save64gpr_ctr_16) +FUNC_END(_save64gpr_ctr_15) +FUNC_END(_save64gpr_ctr_14) + +#endif diff --git a/gcc/config/rs6000/e500crtsavg32gpr.asm b/gcc/config/rs6000/e500crtsavg32gpr.asm new file mode 100644 index 000000000..d14088e0d --- /dev/null +++ b/gcc/config/rs6000/e500crtsavg32gpr.asm @@ -0,0 +1,73 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for saving 32-bit integer registers, called by the compiler. */ +/* "GOT" versions that load the address of the GOT into lr before returning. */ + +HIDDEN_FUNC(_save32gpr_14_g) stw 14,-72(11) +HIDDEN_FUNC(_save32gpr_15_g) stw 15,-68(11) +HIDDEN_FUNC(_save32gpr_16_g) stw 16,-64(11) +HIDDEN_FUNC(_save32gpr_17_g) stw 17,-60(11) +HIDDEN_FUNC(_save32gpr_18_g) stw 18,-56(11) +HIDDEN_FUNC(_save32gpr_19_g) stw 19,-52(11) +HIDDEN_FUNC(_save32gpr_20_g) stw 20,-48(11) +HIDDEN_FUNC(_save32gpr_21_g) stw 21,-44(11) +HIDDEN_FUNC(_save32gpr_22_g) stw 22,-40(11) +HIDDEN_FUNC(_save32gpr_23_g) stw 23,-36(11) +HIDDEN_FUNC(_save32gpr_24_g) stw 24,-32(11) +HIDDEN_FUNC(_save32gpr_25_g) stw 25,-28(11) +HIDDEN_FUNC(_save32gpr_26_g) stw 26,-24(11) +HIDDEN_FUNC(_save32gpr_27_g) stw 27,-20(11) +HIDDEN_FUNC(_save32gpr_28_g) stw 28,-16(11) +HIDDEN_FUNC(_save32gpr_29_g) stw 29,-12(11) +HIDDEN_FUNC(_save32gpr_30_g) stw 30,-8(11) +HIDDEN_FUNC(_save32gpr_31_g) stw 31,-4(11) + b _GLOBAL_OFFSET_TABLE_-4 +FUNC_END(_save32gpr_31_g) +FUNC_END(_save32gpr_30_g) +FUNC_END(_save32gpr_29_g) +FUNC_END(_save32gpr_28_g) +FUNC_END(_save32gpr_27_g) +FUNC_END(_save32gpr_26_g) +FUNC_END(_save32gpr_25_g) +FUNC_END(_save32gpr_24_g) +FUNC_END(_save32gpr_23_g) +FUNC_END(_save32gpr_22_g) +FUNC_END(_save32gpr_21_g) +FUNC_END(_save32gpr_20_g) +FUNC_END(_save32gpr_19_g) +FUNC_END(_save32gpr_18_g) +FUNC_END(_save32gpr_17_g) +FUNC_END(_save32gpr_16_g) +FUNC_END(_save32gpr_15_g) +FUNC_END(_save32gpr_14_g) + +#endif diff --git a/gcc/config/rs6000/e500crtsavg64gpr.asm b/gcc/config/rs6000/e500crtsavg64gpr.asm new file mode 100644 index 000000000..cbad75bc0 --- /dev/null +++ b/gcc/config/rs6000/e500crtsavg64gpr.asm @@ -0,0 +1,73 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for saving 64-bit integer registers, called by the compiler. */ +/* "GOT" versions that load the address of the GOT into lr before returning. */ + +HIDDEN_FUNC(_save64gpr_14_g) evstdd 14,0(11) +HIDDEN_FUNC(_save64gpr_15_g) evstdd 15,8(11) +HIDDEN_FUNC(_save64gpr_16_g) evstdd 16,16(11) +HIDDEN_FUNC(_save64gpr_17_g) evstdd 17,24(11) +HIDDEN_FUNC(_save64gpr_18_g) evstdd 18,32(11) +HIDDEN_FUNC(_save64gpr_19_g) evstdd 19,40(11) +HIDDEN_FUNC(_save64gpr_20_g) evstdd 20,48(11) +HIDDEN_FUNC(_save64gpr_21_g) evstdd 21,56(11) +HIDDEN_FUNC(_save64gpr_22_g) evstdd 22,64(11) +HIDDEN_FUNC(_save64gpr_23_g) evstdd 23,72(11) +HIDDEN_FUNC(_save64gpr_24_g) evstdd 24,80(11) +HIDDEN_FUNC(_save64gpr_25_g) evstdd 25,88(11) +HIDDEN_FUNC(_save64gpr_26_g) evstdd 26,96(11) +HIDDEN_FUNC(_save64gpr_27_g) evstdd 27,104(11) +HIDDEN_FUNC(_save64gpr_28_g) evstdd 28,112(11) +HIDDEN_FUNC(_save64gpr_29_g) evstdd 29,120(11) +HIDDEN_FUNC(_save64gpr_30_g) evstdd 30,128(11) +HIDDEN_FUNC(_save64gpr_31_g) evstdd 31,136(11) + b _GLOBAL_OFFSET_TABLE_-4 +FUNC_END(_save64gpr_31_g) +FUNC_END(_save64gpr_30_g) +FUNC_END(_save64gpr_29_g) +FUNC_END(_save64gpr_28_g) +FUNC_END(_save64gpr_27_g) +FUNC_END(_save64gpr_26_g) +FUNC_END(_save64gpr_25_g) +FUNC_END(_save64gpr_24_g) +FUNC_END(_save64gpr_23_g) +FUNC_END(_save64gpr_22_g) +FUNC_END(_save64gpr_21_g) +FUNC_END(_save64gpr_20_g) +FUNC_END(_save64gpr_19_g) +FUNC_END(_save64gpr_18_g) +FUNC_END(_save64gpr_17_g) +FUNC_END(_save64gpr_16_g) +FUNC_END(_save64gpr_15_g) +FUNC_END(_save64gpr_14_g) + +#endif diff --git a/gcc/config/rs6000/e500crtsavg64gprctr.asm b/gcc/config/rs6000/e500crtsavg64gprctr.asm new file mode 100644 index 000000000..238df4e83 --- /dev/null +++ b/gcc/config/rs6000/e500crtsavg64gprctr.asm @@ -0,0 +1,90 @@ +/* + * Special support for e500 eabi and SVR4 + * + * Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. + * Written by Nathan Froyd + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + + .section ".text" + #include "ppc-asm.h" + +#ifdef __SPE__ + +/* Routines for saving 64-bit integer registers, called by the compiler. */ +/* "GOT" versions that load the address of the GOT into lr before returning. */ + +HIDDEN_FUNC(_save64gpr_ctr_14_g) evstdd 14,0(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_15_g) evstdd 15,8(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_16_g) evstdd 16,16(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_17_g) evstdd 17,24(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_18_g) evstdd 18,32(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_19_g) evstdd 19,40(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_20_g) evstdd 20,48(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_21_g) evstdd 21,56(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_22_g) evstdd 22,64(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_23_g) evstdd 23,72(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_24_g) evstdd 24,80(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_25_g) evstdd 25,88(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_26_g) evstdd 26,96(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_27_g) evstdd 27,104(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_28_g) evstdd 28,112(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_29_g) evstdd 29,120(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_30_g) evstdd 30,128(11) + bdz _save64gpr_ctr_g_done +HIDDEN_FUNC(_save64gpr_ctr_31_g) evstdd 31,136(11) +_save64gpr_ctr_g_done: b _GLOBAL_OFFSET_TABLE_-4 +FUNC_END(_save64gpr_ctr_31_g) +FUNC_END(_save64gpr_ctr_30_g) +FUNC_END(_save64gpr_ctr_29_g) +FUNC_END(_save64gpr_ctr_28_g) +FUNC_END(_save64gpr_ctr_27_g) +FUNC_END(_save64gpr_ctr_26_g) +FUNC_END(_save64gpr_ctr_25_g) +FUNC_END(_save64gpr_ctr_24_g) +FUNC_END(_save64gpr_ctr_23_g) +FUNC_END(_save64gpr_ctr_22_g) +FUNC_END(_save64gpr_ctr_21_g) +FUNC_END(_save64gpr_ctr_20_g) +FUNC_END(_save64gpr_ctr_19_g) +FUNC_END(_save64gpr_ctr_18_g) +FUNC_END(_save64gpr_ctr_17_g) +FUNC_END(_save64gpr_ctr_16_g) +FUNC_END(_save64gpr_ctr_15_g) +FUNC_END(_save64gpr_ctr_14_g) + +#endif diff --git a/gcc/config/rs6000/e500mc.md b/gcc/config/rs6000/e500mc.md new file mode 100644 index 000000000..99a4b80ec --- /dev/null +++ b/gcc/config/rs6000/e500mc.md @@ -0,0 +1,200 @@ +;; Pipeline description for Motorola PowerPC e500mc core. +;; Copyright (C) 2008 Free Software Foundation, Inc. +;; Contributed by Edmar Wienskoski (edmar@freescale.com) +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . +;; +;; e500mc 32-bit SU(2), LSU, FPU, BPU +;; Max issue 3 insns/clock cycle (includes 1 branch) +;; FP is half clocked, timings of other instructions are as in the e500v2. + +(define_automaton "e500mc_most,e500mc_long,e500mc_retire") +(define_cpu_unit "e500mc_decode_0,e500mc_decode_1" "e500mc_most") +(define_cpu_unit "e500mc_issue_0,e500mc_issue_1" "e500mc_most") +(define_cpu_unit "e500mc_retire_0,e500mc_retire_1" "e500mc_retire") + +;; SU. +(define_cpu_unit "e500mc_su0_stage0,e500mc_su1_stage0" "e500mc_most") + +;; MU. +(define_cpu_unit "e500mc_mu_stage0,e500mc_mu_stage1" "e500mc_most") +(define_cpu_unit "e500mc_mu_stage2,e500mc_mu_stage3" "e500mc_most") + +;; Non-pipelined division. +(define_cpu_unit "e500mc_mu_div" "e500mc_long") + +;; LSU. +(define_cpu_unit "e500mc_lsu" "e500mc_most") + +;; FPU. +(define_cpu_unit "e500mc_fpu" "e500mc_most") + +;; Branch unit. +(define_cpu_unit "e500mc_bu" "e500mc_most") + +;; The following units are used to make the automata deterministic. +(define_cpu_unit "present_e500mc_decode_0" "e500mc_most") +(define_cpu_unit "present_e500mc_issue_0" "e500mc_most") +(define_cpu_unit "present_e500mc_retire_0" "e500mc_retire") +(define_cpu_unit "present_e500mc_su0_stage0" "e500mc_most") + +;; The following sets to make automata deterministic when option ndfa is used. +(presence_set "present_e500mc_decode_0" "e500mc_decode_0") +(presence_set "present_e500mc_issue_0" "e500mc_issue_0") +(presence_set "present_e500mc_retire_0" "e500mc_retire_0") +(presence_set "present_e500mc_su0_stage0" "e500mc_su0_stage0") + +;; Some useful abbreviations. +(define_reservation "e500mc_decode" + "e500mc_decode_0|e500mc_decode_1+present_e500mc_decode_0") +(define_reservation "e500mc_issue" + "e500mc_issue_0|e500mc_issue_1+present_e500mc_issue_0") +(define_reservation "e500mc_retire" + "e500mc_retire_0|e500mc_retire_1+present_e500mc_retire_0") +(define_reservation "e500mc_su_stage0" + "e500mc_su0_stage0|e500mc_su1_stage0+present_e500mc_su0_stage0") + +;; Simple SU insns. +(define_insn_reservation "e500mc_su" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,cmp,compare,\ + delayed_compare,var_delayed_compare,fast_compare,\ + shift,trap,var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire") + +(define_insn_reservation "e500mc_two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire,\ + e500mc_issue+e500mc_su_stage0+e500mc_retire") + +(define_insn_reservation "e500mc_three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire,\ + e500mc_issue+e500mc_su_stage0+e500mc_retire,\ + e500mc_issue+e500mc_su_stage0+e500mc_retire") + +;; Multiply. +(define_insn_reservation "e500mc_multiply" 4 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_mu_stage0,e500mc_mu_stage1,\ + e500mc_mu_stage2,e500mc_mu_stage3+e500mc_retire") + +;; Divide. We use the average latency time here. +(define_insn_reservation "e500mc_divide" 14 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_mu_stage0+e500mc_mu_div,\ + e500mc_mu_div*13") + +;; Branch. +(define_insn_reservation "e500mc_branch" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_bu,e500mc_retire") + +;; CR logical. +(define_insn_reservation "e500mc_cr_logical" 1 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_bu,e500mc_retire") + +;; Mfcr. +(define_insn_reservation "e500mc_mfcr" 1 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_su1_stage0+e500mc_retire") + +;; Mtcrf. +(define_insn_reservation "e500mc_mtcrf" 1 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_su1_stage0+e500mc_retire") + +;; Mtjmpr. +(define_insn_reservation "e500mc_mtjmpr" 1 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire") + +;; Brinc. +(define_insn_reservation "e500mc_brinc" 1 + (and (eq_attr "type" "brinc") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire") + +;; Loads. +(define_insn_reservation "e500mc_load" 3 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,sync") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire") + +(define_insn_reservation "e500mc_fpload" 4 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_lsu,nothing*2,e500mc_retire") + +;; Stores. +(define_insn_reservation "e500mc_store" 3 + (and (eq_attr "type" "store,store_ux,store_u,store_c") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire") + +(define_insn_reservation "e500mc_fpstore" 3 + (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire") + +;; The following ignores the retire unit to avoid a large automata. + +;; Simple FP. +(define_insn_reservation "e500mc_simple_float" 8 + (and (eq_attr "type" "fpsimple") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_fpu") +; "e500mc_decode,e500mc_issue+e500mc_fpu,nothing*6,e500mc_retire") + +;; FP. +(define_insn_reservation "e500mc_float" 8 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_fpu") +; "e500mc_decode,e500mc_issue+e500mc_fpu,nothing*6,e500mc_retire") + +(define_insn_reservation "e500mc_fpcompare" 8 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_fpu") + +(define_insn_reservation "e500mc_dmul" 10 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_fpu") + +;; FP divides are not pipelined. +(define_insn_reservation "e500mc_sdiv" 36 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_fpu,e500mc_fpu*35") + +(define_insn_reservation "e500mc_ddiv" 66 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppce500mc")) + "e500mc_decode,e500mc_issue+e500mc_fpu,e500mc_fpu*65") diff --git a/gcc/config/rs6000/e500mc64.md b/gcc/config/rs6000/e500mc64.md new file mode 100644 index 000000000..8507514f5 --- /dev/null +++ b/gcc/config/rs6000/e500mc64.md @@ -0,0 +1,191 @@ +;; Pipeline description for Freescale PowerPC e500mc64 core. +;; Copyright (C) 2009 Free Software Foundation, Inc. +;; Contributed by Edmar Wienskoski (edmar@freescale.com) +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . +;; +;; e500mc64 64-bit SU(2), LSU, FPU, BPU +;; Max issue 3 insns/clock cycle (includes 1 branch) + +(define_automaton "e500mc64_most,e500mc64_long,e500mc64_retire") +(define_cpu_unit "e500mc64_decode_0,e500mc64_decode_1" "e500mc64_most") +(define_cpu_unit "e500mc64_issue_0,e500mc64_issue_1" "e500mc64_most") +(define_cpu_unit "e500mc64_retire_0,e500mc64_retire_1" "e500mc64_retire") + +;; SU. +(define_cpu_unit "e500mc64_su0_stage0,e500mc64_su1_stage0" "e500mc64_most") + +;; MU. +(define_cpu_unit "e500mc64_mu_stage0,e500mc64_mu_stage1" "e500mc64_most") +(define_cpu_unit "e500mc64_mu_stage2,e500mc64_mu_stage3" "e500mc64_most") + +;; Non-pipelined division. +(define_cpu_unit "e500mc64_mu_div" "e500mc64_long") + +;; LSU. +(define_cpu_unit "e500mc64_lsu" "e500mc64_most") + +;; FPU. +(define_cpu_unit "e500mc64_fpu" "e500mc64_most") + +;; Branch unit. +(define_cpu_unit "e500mc64_bu" "e500mc64_most") + +;; The following units are used to make the automata deterministic. +(define_cpu_unit "present_e500mc64_decode_0" "e500mc64_most") +(define_cpu_unit "present_e500mc64_issue_0" "e500mc64_most") +(define_cpu_unit "present_e500mc64_retire_0" "e500mc64_retire") +(define_cpu_unit "present_e500mc64_su0_stage0" "e500mc64_most") + +;; The following sets to make automata deterministic when option ndfa is used. +(presence_set "present_e500mc64_decode_0" "e500mc64_decode_0") +(presence_set "present_e500mc64_issue_0" "e500mc64_issue_0") +(presence_set "present_e500mc64_retire_0" "e500mc64_retire_0") +(presence_set "present_e500mc64_su0_stage0" "e500mc64_su0_stage0") + +;; Some useful abbreviations. +(define_reservation "e500mc64_decode" + "e500mc64_decode_0|e500mc64_decode_1+present_e500mc64_decode_0") +(define_reservation "e500mc64_issue" + "e500mc64_issue_0|e500mc64_issue_1+present_e500mc64_issue_0") +(define_reservation "e500mc64_retire" + "e500mc64_retire_0|e500mc64_retire_1+present_e500mc64_retire_0") +(define_reservation "e500mc64_su_stage0" + "e500mc64_su0_stage0|e500mc64_su1_stage0+present_e500mc64_su0_stage0") + +;; Simple SU insns. +(define_insn_reservation "e500mc64_su" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,delayed_compare,\ + shift,cntlz,exts") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su_stage0+e500mc64_retire") + +(define_insn_reservation "e500mc64_su2" 2 + (and (eq_attr "type" "cmp,compare,delayed_compare,fast_compare,trap") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su_stage0,e500mc64_retire") + +(define_insn_reservation "e500mc64_delayed" 2 + (and (eq_attr "type" "var_shift_rotate,var_delayed_compare") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su_stage0,e500mc64_retire") + +(define_insn_reservation "e500mc64_two" 2 + (and (eq_attr "type" "two") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su_stage0+e500mc64_retire,\ + e500mc64_issue+e500mc64_su_stage0+e500mc64_retire") + +(define_insn_reservation "e500mc64_three" 3 + (and (eq_attr "type" "three") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su_stage0+e500mc64_retire,\ + e500mc64_issue+e500mc64_su_stage0+e500mc64_retire,\ + e500mc64_issue+e500mc64_su_stage0+e500mc64_retire") + +;; Multiply. +(define_insn_reservation "e500mc64_multiply" 4 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_mu_stage0,e500mc64_mu_stage1,\ + e500mc64_mu_stage2,e500mc64_mu_stage3+e500mc64_retire") + +;; Divide. We use the average latency time here. +(define_insn_reservation "e500mc64_divide" 14 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_mu_stage0+e500mc64_mu_div,\ + e500mc64_mu_div*13") + +;; Branch. +(define_insn_reservation "e500mc64_branch" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_bu,e500mc64_retire") + +;; CR logical. +(define_insn_reservation "e500mc64_cr_logical" 1 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_bu,e500mc64_retire") + +;; Mfcr. +(define_insn_reservation "e500mc64_mfcr" 4 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su1_stage0,e500mc64_su1_stage0*3+e500mc64_retire") + +;; Mtcrf. +(define_insn_reservation "e500mc64_mtcrf" 1 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su1_stage0+e500mc64_retire") + +;; Mtjmpr. +(define_insn_reservation "e500mc64_mtjmpr" 1 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su_stage0+e500mc64_retire") + +;; Brinc. +(define_insn_reservation "e500mc64_brinc" 1 + (and (eq_attr "type" "brinc") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_su_stage0+e500mc64_retire") + +;; Loads. +(define_insn_reservation "e500mc64_load" 3 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,sync") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_lsu,nothing,e500mc64_retire") + +(define_insn_reservation "e500mc64_fpload" 4 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_lsu,nothing*2,e500mc64_retire") + +;; Stores. +(define_insn_reservation "e500mc64_store" 3 + (and (eq_attr "type" "store,store_ux,store_u,store_c") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_lsu,nothing,e500mc64_retire") + +(define_insn_reservation "e500mc64_fpstore" 3 + (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_lsu,nothing,e500mc64_retire") + +;; The following ignores the retire unit to avoid a large automata. + +;; FP. +(define_insn_reservation "e500mc64_float" 7 + (and (eq_attr "type" "fpsimple,fp,fpcompare,dmul") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_fpu") +; "e500mc64_decode,e500mc64_issue+e500mc64_fpu,nothing*5,e500mc64_retire") + +;; FP divides are not pipelined. +(define_insn_reservation "e500mc64_sdiv" 20 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_fpu,e500mc64_fpu*19") + +(define_insn_reservation "e500mc64_ddiv" 35 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppce500mc64")) + "e500mc64_decode,e500mc64_issue+e500mc64_fpu,e500mc64_fpu*34") diff --git a/gcc/config/rs6000/eabi-ci.asm b/gcc/config/rs6000/eabi-ci.asm new file mode 100644 index 000000000..696f33d39 --- /dev/null +++ b/gcc/config/rs6000/eabi-ci.asm @@ -0,0 +1,113 @@ +/* crti.s for eabi + Copyright (C) 1996, 2000, 2008, 2009 Free Software Foundation, Inc. + Written By Michael Meissner + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* This file just supplies labeled starting points for the .got* and other + special sections. It is linked in first before other modules. */ + + .ident "GNU C crti.s" + +#include + +#ifndef __powerpc64__ + .section ".got","aw" + .globl __GOT_START__ + .type __GOT_START__,@object +__GOT_START__: + + .section ".got1","aw" + .globl __GOT1_START__ + .type __GOT1_START__,@object +__GOT1_START__: + + .section ".got2","aw" + .globl __GOT2_START__ + .type __GOT2_START__,@object +__GOT2_START__: + + .section ".fixup","aw" + .globl __FIXUP_START__ + .type __FIXUP_START__,@object +__FIXUP_START__: + + .section ".ctors","aw" + .globl __CTOR_LIST__ + .type __CTOR_LIST__,@object +__CTOR_LIST__: + + .section ".dtors","aw" + .globl __DTOR_LIST__ + .type __DTOR_LIST__,@object +__DTOR_LIST__: + + .section ".sdata","aw" + .globl __SDATA_START__ + .type __SDATA_START__,@object + .weak _SDA_BASE_ + .type _SDA_BASE_,@object +__SDATA_START__: +_SDA_BASE_: + + .section ".sbss","aw",@nobits + .globl __SBSS_START__ + .type __SBSS_START__,@object +__SBSS_START__: + + .section ".sdata2","a" + .weak _SDA2_BASE_ + .type _SDA2_BASE_,@object + .globl __SDATA2_START__ + .type __SDATA2_START__,@object +__SDATA2_START__: +_SDA2_BASE_: + + .section ".sbss2","a" + .globl __SBSS2_START__ + .type __SBSS2_START__,@object +__SBSS2_START__: + + .section ".gcc_except_table","aw" + .globl __EXCEPT_START__ + .type __EXCEPT_START__,@object +__EXCEPT_START__: + + .section ".eh_frame","aw" + .globl __EH_FRAME_BEGIN__ + .type __EH_FRAME_BEGIN__,@object +__EH_FRAME_BEGIN__: + +/* Head of __init function used for static constructors. */ + .section ".init","ax" + .align 2 +FUNC_START(__init) + stwu 1,-16(1) + mflr 0 + stw 0,20(1) + +/* Head of __fini function used for static destructors. */ + .section ".fini","ax" + .align 2 +FUNC_START(__fini) + stwu 1,-16(1) + mflr 0 + stw 0,20(1) +#endif diff --git a/gcc/config/rs6000/eabi-cn.asm b/gcc/config/rs6000/eabi-cn.asm new file mode 100644 index 000000000..68774097c --- /dev/null +++ b/gcc/config/rs6000/eabi-cn.asm @@ -0,0 +1,104 @@ +/* crtn.s for eabi + Copyright (C) 1996, 2000, 2007, 2008, 2009 Free Software Foundation, Inc. + Written By Michael Meissner + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* This file just supplies labeled ending points for the .got* and other + special sections. It is linked in last after other modules. */ + + .ident "GNU C crtn.s" + +#ifndef __powerpc64__ + .section ".got","aw" + .globl __GOT_END__ + .type __GOT_END__,@object +__GOT_END__: + + .section ".got1","aw" + .globl __GOT1_END__ + .type __GOT1_END__,@object +__GOT1_END__: + + .section ".got2","aw" + .globl __GOT2_END__ + .type __GOT2_END__,@object +__GOT2_END__: + + .section ".fixup","aw" + .globl __FIXUP_END__ + .type __FIXUP_END__,@object +__FIXUP_END__: + + .section ".ctors","aw" + .globl __CTOR_END__ + .type __CTOR_END__,@object +__CTOR_END__: + + .section ".dtors","aw" + .weak __DTOR_END__ + .type __DTOR_END__,@object +__DTOR_END__: + + .section ".sdata","aw" + .globl __SDATA_END__ + .type __SDATA_END__,@object +__SDATA_END__: + + .section ".sbss","aw",@nobits + .globl __SBSS_END__ + .type __SBSS_END__,@object +__SBSS_END__: + + .section ".sdata2","a" + .globl __SDATA2_END__ + .type __SDATA2_END__,@object +__SDATA2_END__: + + .section ".sbss2","a" + .globl __SBSS2_END__ + .type __SBSS2_END__,@object +__SBSS2_END__: + + .section ".gcc_except_table","aw" + .globl __EXCEPT_END__ + .type __EXCEPT_END__,@object +__EXCEPT_END__: + + .section ".eh_frame","aw" + .globl __EH_FRAME_END__ + .type __EH_FRAME_END__,@object +__EH_FRAME_END__: + .long 0 + +/* Tail of __init function used for static constructors. */ + .section ".init","ax" + lwz 0,20(1) + mtlr 0 + addi 1,1,16 + blr + +/* Tail of __fini function used for static destructors. */ + .section ".fini","ax" + lwz 0,20(1) + mtlr 0 + addi 1,1,16 + blr +#endif diff --git a/gcc/config/rs6000/eabi.asm b/gcc/config/rs6000/eabi.asm new file mode 100644 index 000000000..292d88e50 --- /dev/null +++ b/gcc/config/rs6000/eabi.asm @@ -0,0 +1,289 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009 + * Free Software Foundation, Inc. + * Written By Michael Meissner + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +/* Do any initializations needed for the eabi environment */ + + .section ".text" + #include "ppc-asm.h" + +#ifndef __powerpc64__ + + .section ".got2","aw" + .align 2 +.LCTOC1 = . /* +32768 */ + +/* Table of addresses */ +.Ltable = .-.LCTOC1 + .long .LCTOC1 /* address we are really at */ + +.Lsda = .-.LCTOC1 + .long _SDA_BASE_ /* address of the first small data area */ + +.Lsdas = .-.LCTOC1 + .long __SDATA_START__ /* start of .sdata/.sbss section */ + +.Lsdae = .-.LCTOC1 + .long __SBSS_END__ /* end of .sdata/.sbss section */ + +.Lsda2 = .-.LCTOC1 + .long _SDA2_BASE_ /* address of the second small data area */ + +.Lsda2s = .-.LCTOC1 + .long __SDATA2_START__ /* start of .sdata2/.sbss2 section */ + +.Lsda2e = .-.LCTOC1 + .long __SBSS2_END__ /* end of .sdata2/.sbss2 section */ + +#ifdef _RELOCATABLE +.Lgots = .-.LCTOC1 + .long __GOT_START__ /* Global offset table start */ + +.Lgotm1 = .-.LCTOC1 + .long _GLOBAL_OFFSET_TABLE_-4 /* end of GOT ptrs before BLCL + 3 reserved words */ + +.Lgotm2 = .-.LCTOC1 + .long _GLOBAL_OFFSET_TABLE_+12 /* start of GOT ptrs after BLCL + 3 reserved words */ + +.Lgote = .-.LCTOC1 + .long __GOT_END__ /* Global offset table end */ + +.Lgot2s = .-.LCTOC1 + .long __GOT2_START__ /* -mrelocatable GOT pointers start */ + +.Lgot2e = .-.LCTOC1 + .long __GOT2_END__ /* -mrelocatable GOT pointers end */ + +.Lfixups = .-.LCTOC1 + .long __FIXUP_START__ /* start of .fixup section */ + +.Lfixupe = .-.LCTOC1 + .long __FIXUP_END__ /* end of .fixup section */ + +.Lctors = .-.LCTOC1 + .long __CTOR_LIST__ /* start of .ctor section */ + +.Lctore = .-.LCTOC1 + .long __CTOR_END__ /* end of .ctor section */ + +.Ldtors = .-.LCTOC1 + .long __DTOR_LIST__ /* start of .dtor section */ + +.Ldtore = .-.LCTOC1 + .long __DTOR_END__ /* end of .dtor section */ + +.Lexcepts = .-.LCTOC1 + .long __EXCEPT_START__ /* start of .gcc_except_table section */ + +.Lexcepte = .-.LCTOC1 + .long __EXCEPT_END__ /* end of .gcc_except_table section */ + +.Linit = .-.LCTOC1 + .long .Linit_p /* address of variable to say we've been called */ + + .text + .align 2 +.Lptr: + .long .LCTOC1-.Laddr /* PC relative pointer to .got2 */ +#endif + + .data + .align 2 +.Linit_p: + .long 0 + + .text + +FUNC_START(__eabi) + +/* Eliminate -mrelocatable code if not -mrelocatable, so that this file can + be assembled with other assemblers than GAS. */ + +#ifndef _RELOCATABLE + addis 10,0,.Linit_p@ha /* init flag */ + addis 11,0,.LCTOC1@ha /* load address of .LCTOC1 */ + lwz 9,.Linit_p@l(10) /* init flag */ + addi 11,11,.LCTOC1@l + cmplwi 2,9,0 /* init flag != 0? */ + bnelr 2 /* return now, if we've been called already */ + stw 1,.Linit_p@l(10) /* store a nonzero value in the done flag */ + +#else /* -mrelocatable */ + mflr 0 + bl .Laddr /* get current address */ +.Laddr: + mflr 12 /* real address of .Laddr */ + lwz 11,(.Lptr-.Laddr)(12) /* linker generated address of .LCTOC1 */ + add 11,11,12 /* correct to real pointer */ + lwz 12,.Ltable(11) /* get linker's idea of where .Laddr is */ + lwz 10,.Linit(11) /* address of init flag */ + subf. 12,12,11 /* calculate difference */ + lwzx 9,10,12 /* done flag */ + cmplwi 2,9,0 /* init flag != 0? */ + mtlr 0 /* restore in case branch was taken */ + bnelr 2 /* return now, if we've been called already */ + stwx 1,10,12 /* store a nonzero value in the done flag */ + beq+ 0,.Lsdata /* skip if we don't need to relocate */ + +/* We need to relocate the .got2 pointers. */ + + lwz 3,.Lgot2s(11) /* GOT2 pointers start */ + lwz 4,.Lgot2e(11) /* GOT2 pointers end */ + add 3,12,3 /* adjust pointers */ + add 4,12,4 + bl FUNC_NAME(__eabi_convert) /* convert pointers in .got2 section */ + +/* Fixup the .ctor section for static constructors */ + + lwz 3,.Lctors(11) /* constructors pointers start */ + lwz 4,.Lctore(11) /* constructors pointers end */ + bl FUNC_NAME(__eabi_convert) /* convert constructors */ + +/* Fixup the .dtor section for static destructors */ + + lwz 3,.Ldtors(11) /* destructors pointers start */ + lwz 4,.Ldtore(11) /* destructors pointers end */ + bl FUNC_NAME(__eabi_convert) /* convert destructors */ + +/* Fixup the .gcc_except_table section for G++ exceptions */ + + lwz 3,.Lexcepts(11) /* exception table pointers start */ + lwz 4,.Lexcepte(11) /* exception table pointers end */ + bl FUNC_NAME(__eabi_convert) /* convert exceptions */ + +/* Fixup the addresses in the GOT below _GLOBAL_OFFSET_TABLE_-4 */ + + lwz 3,.Lgots(11) /* GOT table pointers start */ + lwz 4,.Lgotm1(11) /* GOT table pointers below _GLOBAL_OFFSET_TABLE-4 */ + bl FUNC_NAME(__eabi_convert) /* convert lower GOT */ + +/* Fixup the addresses in the GOT above _GLOBAL_OFFSET_TABLE_+12 */ + + lwz 3,.Lgotm2(11) /* GOT table pointers above _GLOBAL_OFFSET_TABLE+12 */ + lwz 4,.Lgote(11) /* GOT table pointers end */ + bl FUNC_NAME(__eabi_convert) /* convert lower GOT */ + +/* Fixup any user initialized pointers now (the compiler drops pointers to */ +/* each of the relocs that it does in the .fixup section). */ + +.Lfix: + lwz 3,.Lfixups(11) /* fixup pointers start */ + lwz 4,.Lfixupe(11) /* fixup pointers end */ + bl FUNC_NAME(__eabi_uconvert) /* convert user initialized pointers */ + +.Lsdata: + mtlr 0 /* restore link register */ +#endif /* _RELOCATABLE */ + +/* Only load up register 13 if there is a .sdata and/or .sbss section */ + lwz 3,.Lsdas(11) /* start of .sdata/.sbss section */ + lwz 4,.Lsdae(11) /* end of .sdata/.sbss section */ + cmpw 1,3,4 /* .sdata/.sbss section non-empty? */ + beq- 1,.Lsda2l /* skip loading r13 */ + + lwz 13,.Lsda(11) /* load r13 with _SDA_BASE_ address */ + +/* Only load up register 2 if there is a .sdata2 and/or .sbss2 section */ + +.Lsda2l: + lwz 3,.Lsda2s(11) /* start of .sdata/.sbss section */ + lwz 4,.Lsda2e(11) /* end of .sdata/.sbss section */ + cmpw 1,3,4 /* .sdata/.sbss section non-empty? */ + beq+ 1,.Ldone /* skip loading r2 */ + + lwz 2,.Lsda2(11) /* load r2 with _SDA2_BASE_ address */ + +/* Done adjusting pointers, return by way of doing the C++ global constructors. */ + +.Ldone: + b FUNC_NAME(__init) /* do any C++ global constructors (which returns to caller) */ +FUNC_END(__eabi) + +/* Special subroutine to convert a bunch of pointers directly. + r0 has original link register + r3 has low pointer to convert + r4 has high pointer to convert + r5 .. r10 are scratch registers + r11 has the address of .LCTOC1 in it. + r12 has the value to add to each pointer + r13 .. r31 are unchanged */ +#ifdef _RELOCATABLE +FUNC_START(__eabi_convert) + cmplw 1,3,4 /* any pointers to convert? */ + subf 5,3,4 /* calculate number of words to convert */ + bclr 4,4 /* return if no pointers */ + + srawi 5,5,2 + addi 3,3,-4 /* start-4 for use with lwzu */ + mtctr 5 + +.Lcvt: + lwzu 6,4(3) /* pointer to convert */ + cmpwi 0,6,0 + beq- .Lcvt2 /* if pointer is null, don't convert */ + + add 6,6,12 /* convert pointer */ + stw 6,0(3) +.Lcvt2: + bdnz+ .Lcvt + blr + +FUNC_END(__eabi_convert) + +/* Special subroutine to convert the pointers the user has initialized. The + compiler has placed the address of the initialized pointer into the .fixup + section. + + r0 has original link register + r3 has low pointer to convert + r4 has high pointer to convert + r5 .. r10 are scratch registers + r11 has the address of .LCTOC1 in it. + r12 has the value to add to each pointer + r13 .. r31 are unchanged */ + +FUNC_START(__eabi_uconvert) + cmplw 1,3,4 /* any pointers to convert? */ + subf 5,3,4 /* calculate number of words to convert */ + bclr 4,4 /* return if no pointers */ + + srawi 5,5,2 + addi 3,3,-4 /* start-4 for use with lwzu */ + mtctr 5 + +.Lucvt: + lwzu 6,4(3) /* next pointer to pointer to convert */ + add 6,6,12 /* adjust pointer */ + lwz 7,0(6) /* get the pointer it points to */ + stw 6,0(3) /* store adjusted pointer */ + add 7,7,12 /* adjust */ + stw 7,0(6) + bdnz+ .Lucvt + blr + +FUNC_END(__eabi_uconvert) +#endif +#endif diff --git a/gcc/config/rs6000/eabi.h b/gcc/config/rs6000/eabi.h new file mode 100644 index 000000000..3024a7586 --- /dev/null +++ b/gcc/config/rs6000/eabi.h @@ -0,0 +1,44 @@ +/* Core target definitions for GNU compiler + for IBM RS/6000 PowerPC targeted to embedded ELF systems. + Copyright (C) 1995, 1996, 2000, 2003, 2004, 2007 Free Software Foundation, Inc. + Contributed by Cygnus Support. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* Add -meabi to target flags. */ +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI) + +/* Invoke an initializer function to set up the GOT. */ +#define NAME__MAIN "__eabi" +#define INVOKE__main + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded)"); + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define_std ("PPC"); \ + builtin_define ("__embedded__"); \ + builtin_assert ("system=embedded"); \ + builtin_assert ("cpu=powerpc"); \ + builtin_assert ("machine=powerpc"); \ + TARGET_OS_SYSV_CPP_BUILTINS (); \ + } \ + while (0) diff --git a/gcc/config/rs6000/eabialtivec.h b/gcc/config/rs6000/eabialtivec.h new file mode 100644 index 000000000..417be97a4 --- /dev/null +++ b/gcc/config/rs6000/eabialtivec.h @@ -0,0 +1,30 @@ +/* Core target definitions for GNU compiler + for PowerPC targeted systems with AltiVec support. + Copyright (C) 2001, 2003, 2007 Free Software Foundation, Inc. + Contributed by Aldy Hernandez (aldyh@redhat.com). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* Add -meabi and -maltivec to target flags. */ +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI | MASK_ALTIVEC) + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded with AltiVec)"); + +#undef SUBSUBTARGET_OVERRIDE_OPTIONS +#define SUBSUBTARGET_OVERRIDE_OPTIONS rs6000_altivec_abi = 1 diff --git a/gcc/config/rs6000/eabisim.h b/gcc/config/rs6000/eabisim.h new file mode 100644 index 000000000..65bc14dff --- /dev/null +++ b/gcc/config/rs6000/eabisim.h @@ -0,0 +1,54 @@ +/* Support for GCC on simulated PowerPC systems targeted to embedded ELF + systems. + Copyright (C) 1995, 1996, 2000, 2003, 2007 Free Software Foundation, Inc. + Contributed by Cygnus Support. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC Simulated)"); + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define_std ("PPC"); \ + builtin_define ("__embedded__"); \ + builtin_define ("__simulator__"); \ + builtin_assert ("system=embedded"); \ + builtin_assert ("system=simulator"); \ + builtin_assert ("cpu=powerpc"); \ + builtin_assert ("machine=powerpc"); \ + TARGET_OS_SYSV_CPP_BUILTINS (); \ + } \ + while (0) + +/* Make the simulator the default */ +#undef LIB_DEFAULT_SPEC +#define LIB_DEFAULT_SPEC "%(lib_sim)" + +#undef STARTFILE_DEFAULT_SPEC +#define STARTFILE_DEFAULT_SPEC "%(startfile_sim)" + +#undef ENDFILE_DEFAULT_SPEC +#define ENDFILE_DEFAULT_SPEC "%(endfile_sim)" + +#undef LINK_START_DEFAULT_SPEC +#define LINK_START_DEFAULT_SPEC "%(link_start_sim)" + +#undef LINK_OS_DEFAULT_SPEC +#define LINK_OS_DEFAULT_SPEC "%(link_os_sim)" diff --git a/gcc/config/rs6000/eabispe.h b/gcc/config/rs6000/eabispe.h new file mode 100644 index 000000000..d3fc8a6be --- /dev/null +++ b/gcc/config/rs6000/eabispe.h @@ -0,0 +1,54 @@ +/* Core target definitions for GNU compiler + for PowerPC embedded targeted systems with SPE support. + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 + Free Software Foundation, Inc. + Contributed by Aldy Hernandez (aldyh@redhat.com). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI \ + | MASK_STRICT_ALIGN) + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded SPE)"); + +#undef SUBSUBTARGET_OVERRIDE_OPTIONS +#define SUBSUBTARGET_OVERRIDE_OPTIONS \ + if (rs6000_select[1].string == NULL) \ + rs6000_cpu = PROCESSOR_PPC8540; \ + if (!rs6000_explicit_options.spe_abi) \ + rs6000_spe_abi = 1; \ + if (!rs6000_explicit_options.float_gprs) \ + rs6000_float_gprs = 1; \ + if (!rs6000_explicit_options.spe) \ + rs6000_spe = 1; \ + if (target_flags & MASK_64BIT) \ + error ("-m64 not supported in this configuration") + +/* The e500 ABI says that either long doubles are 128 bits, or if + implemented in any other size, the compiler/linker should error out. + We have no emulation libraries for 128 bit long doubles, and I hate + the dozens of failures on the regression suite. So I'm breaking ABI + specifications, until I properly fix the emulation. + + Enable these later. +#define RS6000_DEFAULT_LONG_DOUBLE_SIZE (TARGET_SPE ? 128 : 64) +*/ + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "-mppc -mspe -me500" diff --git a/gcc/config/rs6000/freebsd.h b/gcc/config/rs6000/freebsd.h new file mode 100644 index 000000000..567263b2a --- /dev/null +++ b/gcc/config/rs6000/freebsd.h @@ -0,0 +1,80 @@ +/* Definitions for PowerPC running FreeBSD using the ELF format + Copyright (C) 2001, 2003, 2007, 2009, 2010, 2011 + Free Software Foundation, Inc. + Contributed by David E. O'Brien and BSDi. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* Override the defaults, which exist to force the proper definition. */ + +#undef CPP_OS_DEFAULT_SPEC +#define CPP_OS_DEFAULT_SPEC "%(cpp_os_freebsd)" + +#undef STARTFILE_DEFAULT_SPEC +#define STARTFILE_DEFAULT_SPEC "%(startfile_freebsd)" + +#undef ENDFILE_DEFAULT_SPEC +#define ENDFILE_DEFAULT_SPEC "%(endfile_freebsd)" + +#undef LIB_DEFAULT_SPEC +#define LIB_DEFAULT_SPEC "%(lib_freebsd)" + +#undef LINK_START_DEFAULT_SPEC +#define LINK_START_DEFAULT_SPEC "%(link_start_freebsd)" + +#undef LINK_OS_DEFAULT_SPEC +#define LINK_OS_DEFAULT_SPEC "%(link_os_freebsd)" + +/* XXX: This is wrong for many platforms in sysv4.h. + We should work on getting that definition fixed. */ +#undef LINK_SHLIB_SPEC +#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}" + + +/************************[ Target stuff ]***********************************/ + +/* Define the actual types of some ANSI-mandated types. + Needs to agree with . GCC defaults come from c-decl.c, + c-common.c, and config//.h. */ + +#undef SIZE_TYPE +#define SIZE_TYPE "unsigned int" + +/* rs6000.h gets this wrong for FreeBSD. We use the GCC defaults instead. */ +#undef WCHAR_TYPE + +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (FreeBSD/PowerPC ELF)"); + +/* Override rs6000.h definition. */ +#undef ASM_APP_ON +#define ASM_APP_ON "#APP\n" + +/* Override rs6000.h definition. */ +#undef ASM_APP_OFF +#define ASM_APP_OFF "#NO_APP\n" + +/* We don't need to generate entries in .fixup, except when + -mrelocatable or -mrelocatable-lib is given. */ +#undef RELOCATABLE_NEEDS_FIXUP +#define RELOCATABLE_NEEDS_FIXUP \ + (target_flags & target_flags_explicit & MASK_RELOCATABLE) + +#define DBX_REGISTER_NUMBER(REGNO) rs6000_dbx_register_number (REGNO) diff --git a/gcc/config/rs6000/gnu.h b/gcc/config/rs6000/gnu.h new file mode 100644 index 000000000..0f329e53f --- /dev/null +++ b/gcc/config/rs6000/gnu.h @@ -0,0 +1,37 @@ +/* Definitions of target machine for GNU compiler, + for PowerPC machines running GNU. + Copyright (C) 2001, 2003, 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef CPP_OS_DEFAULT_SPEC +#define CPP_OS_DEFAULT_SPEC "%(cpp_os_gnu)" + +#undef STARTFILE_DEFAULT_SPEC +#define STARTFILE_DEFAULT_SPEC "%(startfile_gnu)" + +#undef ENDFILE_DEFAULT_SPEC +#define ENDFILE_DEFAULT_SPEC "%(endfile_gnu)" + +#undef LINK_START_DEFAULT_SPEC +#define LINK_START_DEFAULT_SPEC "%(link_start_gnu)" + +#undef LINK_OS_DEFAULT_SPEC +#define LINK_OS_DEFAULT_SPEC "%(link_os_gnu)" + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC GNU)"); diff --git a/gcc/config/rs6000/host-darwin.c b/gcc/config/rs6000/host-darwin.c new file mode 100644 index 000000000..48afa46e1 --- /dev/null +++ b/gcc/config/rs6000/host-darwin.c @@ -0,0 +1,154 @@ +/* Darwin/powerpc host-specific hook definitions. + Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2010 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include +#include "hosthooks.h" +#include "hosthooks-def.h" +#include "diagnostic.h" +#include "config/host-darwin.h" + +static void segv_crash_handler (int); +static void segv_handler (int, siginfo_t *, void *); +static void darwin_rs6000_extra_signals (void); + +#ifndef HAVE_DECL_SIGALTSTACK +/* This doesn't have a prototype in signal.h in 10.2.x and earlier, + fixed in later releases. */ +extern int sigaltstack(const struct sigaltstack *, struct sigaltstack *); +#endif + +/* The fields of the mcontext_t type have acquired underscores in later + OS versions. */ +#ifdef HAS_MCONTEXT_T_UNDERSCORES +#define MC_FLD(x) __ ## x +#else +#define MC_FLD(x) x +#endif + +#undef HOST_HOOKS_EXTRA_SIGNALS +#define HOST_HOOKS_EXTRA_SIGNALS darwin_rs6000_extra_signals + +/* On Darwin/powerpc, hitting the stack limit turns into a SIGSEGV. + This code detects the difference between hitting the stack limit and + a true wild pointer dereference by looking at the instruction that + faulted; only a few kinds of instruction are used to access below + the previous bottom of the stack. */ + +static void +segv_crash_handler (int sig ATTRIBUTE_UNUSED) +{ + internal_error ("Segmentation Fault (code)"); +} + +static void +segv_handler (int sig ATTRIBUTE_UNUSED, + siginfo_t *sip ATTRIBUTE_UNUSED, + void *scp) +{ + ucontext_t *uc = (ucontext_t *)scp; + sigset_t sigset; + unsigned faulting_insn; + + /* The fault might have happened when trying to run some instruction, in + which case the next line will segfault _again_. Handle this case. */ + signal (SIGSEGV, segv_crash_handler); + sigemptyset (&sigset); + sigaddset (&sigset, SIGSEGV); + sigprocmask (SIG_UNBLOCK, &sigset, NULL); + + faulting_insn = *(unsigned *)uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0); + + /* Note that this only has to work for GCC, so we don't have to deal + with all the possible cases (GCC has no AltiVec code, for + instance). It's complicated because Darwin allows stores to + below the stack pointer, and the prologue code takes advantage of + this. */ + + if ((faulting_insn & 0xFFFF8000) == 0x94218000 /* stwu %r1, -xxx(%r1) */ + || (faulting_insn & 0xFC1F03FF) == 0x7C01016E /* stwux xxx, %r1, xxx */ + || (faulting_insn & 0xFC1F8000) == 0x90018000 /* stw xxx, -yyy(%r1) */ + || (faulting_insn & 0xFC1F8000) == 0xD8018000 /* stfd xxx, -yyy(%r1) */ + || (faulting_insn & 0xFC1F8000) == 0xBC018000 /* stmw xxx, -yyy(%r1) */) + { + char *shell_name; + + fnotice (stderr, "Out of stack space.\n"); + shell_name = getenv ("SHELL"); + if (shell_name != NULL) + shell_name = strrchr (shell_name, '/'); + if (shell_name != NULL) + { + static const char * shell_commands[][2] = { + { "sh", "ulimit -S -s unlimited" }, + { "bash", "ulimit -S -s unlimited" }, + { "tcsh", "limit stacksize unlimited" }, + { "csh", "limit stacksize unlimited" }, + /* zsh doesn't have "unlimited", this will work under the + default configuration. */ + { "zsh", "limit stacksize 32m" } + }; + size_t i; + + for (i = 0; i < ARRAY_SIZE (shell_commands); i++) + if (strcmp (shell_commands[i][0], shell_name + 1) == 0) + { + fnotice (stderr, + "Try running '%s' in the shell to raise its limit.\n", + shell_commands[i][1]); + } + } + + if (global_dc->abort_on_error) + fancy_abort (__FILE__, __LINE__, __FUNCTION__); + + exit (FATAL_EXIT_CODE); + } + + fprintf (stderr, "[address=%08lx pc=%08x]\n", + uc->uc_mcontext->MC_FLD(es).MC_FLD(dar), + uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0)); + internal_error ("Segmentation Fault"); + exit (FATAL_EXIT_CODE); +} + +static void +darwin_rs6000_extra_signals (void) +{ + struct sigaction sact; + stack_t sigstk; + + sigstk.ss_sp = (char*)xmalloc (SIGSTKSZ); + sigstk.ss_size = SIGSTKSZ; + sigstk.ss_flags = 0; + if (sigaltstack (&sigstk, NULL) < 0) + fatal_error ("While setting up signal stack: %m"); + + sigemptyset(&sact.sa_mask); + sact.sa_flags = SA_ONSTACK | SA_SIGINFO; + sact.sa_sigaction = segv_handler; + if (sigaction (SIGSEGV, &sact, 0) < 0) + fatal_error ("While setting up signal handler: %m"); +} + + +const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER; diff --git a/gcc/config/rs6000/host-ppc64-darwin.c b/gcc/config/rs6000/host-ppc64-darwin.c new file mode 100644 index 000000000..49a920475 --- /dev/null +++ b/gcc/config/rs6000/host-ppc64-darwin.c @@ -0,0 +1,30 @@ +/* ppc64-darwin host-specific hook definitions. + Copyright (C) 2006, 2007 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "hosthooks.h" +#include "hosthooks-def.h" +#include "config/host-darwin.h" + +/* Darwin doesn't do anything special for ppc64 hosts; this file exists just + to include config/host-darwin.h. */ + +const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER; diff --git a/gcc/config/rs6000/libgcc-ppc-glibc.ver b/gcc/config/rs6000/libgcc-ppc-glibc.ver new file mode 100644 index 000000000..8862c14cb --- /dev/null +++ b/gcc/config/rs6000/libgcc-ppc-glibc.ver @@ -0,0 +1,73 @@ +# Copyright (C) 2006, 2007 Free Software Foundation, Inc. +# +# This file is part of GCC. +# +# GCC is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GCC is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +%ifndef _SOFT_FLOAT +%ifndef __powerpc64__ +%exclude { + __multc3 + __divtc3 + __powitf2 + __fixtfdi + __fixunstfdi + __floatditf +} + +GCC_4.1.0 { + # long double support + __multc3 + __divtc3 + __powitf2 + __fixtfdi + __fixunstfdi + __floatditf + +%else +GCC_3.4.4 { +%endif +%else +GCC_4.2.0 { +%endif + + # long double support + __gcc_qadd + __gcc_qsub + __gcc_qmul + __gcc_qdiv + +%ifdef _SOFT_DOUBLE + __gcc_qneg + __gcc_qeq + __gcc_qne + __gcc_qgt + __gcc_qge + __gcc_qlt + __gcc_qle + __gcc_stoq + __gcc_dtoq + __gcc_qtos + __gcc_qtod + __gcc_qtoi + __gcc_qtou + __gcc_itoq + __gcc_utoq +%endif + +%ifdef __NO_FPRS__ + __gcc_qunord +%endif +} diff --git a/gcc/config/rs6000/libgcc-ppc64.ver b/gcc/config/rs6000/libgcc-ppc64.ver new file mode 100644 index 000000000..b27b4b492 --- /dev/null +++ b/gcc/config/rs6000/libgcc-ppc64.ver @@ -0,0 +1,7 @@ +GCC_3.4.4 { + # long double support + __gcc_qadd + __gcc_qsub + __gcc_qmul + __gcc_qdiv +} diff --git a/gcc/config/rs6000/linux-unwind.h b/gcc/config/rs6000/linux-unwind.h new file mode 100644 index 000000000..a16df97e9 --- /dev/null +++ b/gcc/config/rs6000/linux-unwind.h @@ -0,0 +1,355 @@ +/* DWARF2 EH unwinding support for PowerPC and PowerPC64 Linux. + Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#define R_LR 65 +#define R_CR2 70 +#define R_VR0 77 +#define R_VRSAVE 109 +#define R_VSCR 110 + +struct gcc_vregs +{ + __attribute__ ((vector_size (16))) int vr[32]; +#ifdef __powerpc64__ + unsigned int pad1[3]; + unsigned int vscr; + unsigned int vsave; + unsigned int pad2[3]; +#else + unsigned int vsave; + unsigned int pad[2]; + unsigned int vscr; +#endif +}; + +struct gcc_regs +{ + unsigned long gpr[32]; + unsigned long nip; + unsigned long msr; + unsigned long orig_gpr3; + unsigned long ctr; + unsigned long link; + unsigned long xer; + unsigned long ccr; + unsigned long softe; + unsigned long trap; + unsigned long dar; + unsigned long dsisr; + unsigned long result; + unsigned long pad1[4]; + double fpr[32]; + unsigned int pad2; + unsigned int fpscr; +#ifdef __powerpc64__ + struct gcc_vregs *vp; +#else + unsigned int pad3[2]; +#endif + struct gcc_vregs vregs; +}; + +struct gcc_ucontext +{ +#ifdef __powerpc64__ + unsigned long pad[28]; +#else + unsigned long pad[12]; +#endif + struct gcc_regs *regs; + struct gcc_regs rsave; +}; + +#ifdef __powerpc64__ + +enum { SIGNAL_FRAMESIZE = 128 }; + +/* If PC is at a sigreturn trampoline, return a pointer to the + regs. Otherwise return NULL. */ + +static struct gcc_regs * +get_regs (struct _Unwind_Context *context) +{ + const unsigned int *pc = context->ra; + + /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */ + /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */ + if (pc[0] != 0x38210000 + SIGNAL_FRAMESIZE || pc[2] != 0x44000002) + return NULL; + if (pc[1] == 0x38000077) + { + struct sigframe { + char gap[SIGNAL_FRAMESIZE]; + unsigned long pad[7]; + struct gcc_regs *regs; + } *frame = (struct sigframe *) context->cfa; + return frame->regs; + } + else if (pc[1] == 0x380000AC) + { + /* This works for 2.4 kernels, but not for 2.6 kernels with vdso + because pc isn't pointing into the stack. Can be removed when + no one is running 2.4.19 or 2.4.20, the first two ppc64 + kernels released. */ + const struct rt_sigframe_24 { + int tramp[6]; + void *pinfo; + struct gcc_ucontext *puc; + } *frame24 = (const struct rt_sigframe_24 *) context->ra; + + /* Test for magic value in *puc of vdso. */ + if ((long) frame24->puc != -21 * 8) + return frame24->puc->regs; + else + { + /* This works for 2.4.21 and later kernels. */ + struct rt_sigframe { + char gap[SIGNAL_FRAMESIZE]; + struct gcc_ucontext uc; + unsigned long pad[2]; + int tramp[6]; + void *pinfo; + struct gcc_ucontext *puc; + } *frame = (struct rt_sigframe *) context->cfa; + return frame->uc.regs; + } + } + return NULL; +} + +#else /* !__powerpc64__ */ + +enum { SIGNAL_FRAMESIZE = 64 }; + +static struct gcc_regs * +get_regs (struct _Unwind_Context *context) +{ + const unsigned int *pc = context->ra; + + /* li r0, 0x7777; sc (sigreturn old) */ + /* li r0, 0x0077; sc (sigreturn new) */ + /* li r0, 0x6666; sc (rt_sigreturn old) */ + /* li r0, 0x00AC; sc (rt_sigreturn new) */ + if (pc[1] != 0x44000002) + return NULL; + if (pc[0] == 0x38007777 || pc[0] == 0x38000077) + { + struct sigframe { + char gap[SIGNAL_FRAMESIZE]; + unsigned long pad[7]; + struct gcc_regs *regs; + } *frame = (struct sigframe *) context->cfa; + return frame->regs; + } + else if (pc[0] == 0x38006666 || pc[0] == 0x380000AC) + { + struct rt_sigframe { + char gap[SIGNAL_FRAMESIZE + 16]; + char siginfo[128]; + struct gcc_ucontext uc; + } *frame = (struct rt_sigframe *) context->cfa; + return frame->uc.regs; + } + return NULL; +} +#endif + +/* Find an entry in the process auxiliary vector. The canonical way to + test for VMX is to look at AT_HWCAP. */ + +static long +ppc_linux_aux_vector (long which) +{ + /* __libc_stack_end holds the original stack passed to a process. */ + extern long *__libc_stack_end; + long argc; + char **argv; + char **envp; + struct auxv + { + long a_type; + long a_val; + } *auxp; + + /* The Linux kernel puts argc first on the stack. */ + argc = __libc_stack_end[0]; + /* Followed by argv, NULL terminated. */ + argv = (char **) __libc_stack_end + 1; + /* Followed by environment string pointers, NULL terminated. */ + envp = argv + argc + 1; + while (*envp++) + continue; + /* Followed by the aux vector, zero terminated. */ + for (auxp = (struct auxv *) envp; auxp->a_type != 0; ++auxp) + if (auxp->a_type == which) + return auxp->a_val; + return 0; +} + +/* Do code reading to identify a signal frame, and set the frame + state data appropriately. See unwind-dw2.c for the structs. */ + +#define MD_FALLBACK_FRAME_STATE_FOR ppc_fallback_frame_state + +static _Unwind_Reason_Code +ppc_fallback_frame_state (struct _Unwind_Context *context, + _Unwind_FrameState *fs) +{ + static long hwcap = 0; + struct gcc_regs *regs = get_regs (context); + long new_cfa; + int i; + + if (regs == NULL) + return _URC_END_OF_STACK; + + new_cfa = regs->gpr[STACK_POINTER_REGNUM]; + fs->regs.cfa_how = CFA_REG_OFFSET; + fs->regs.cfa_reg = STACK_POINTER_REGNUM; + fs->regs.cfa_offset = new_cfa - (long) context->cfa; + + for (i = 0; i < 32; i++) + if (i != STACK_POINTER_REGNUM) + { + fs->regs.reg[i].how = REG_SAVED_OFFSET; + fs->regs.reg[i].loc.offset = (long) ®s->gpr[i] - new_cfa; + } + + fs->regs.reg[R_CR2].how = REG_SAVED_OFFSET; + /* CR? regs are always 32-bit and PPC is big-endian, so in 64-bit + libgcc loc.offset needs to point to the low 32 bits of regs->ccr. */ + fs->regs.reg[R_CR2].loc.offset = (long) ®s->ccr - new_cfa + + sizeof (long) - 4; + + fs->regs.reg[R_LR].how = REG_SAVED_OFFSET; + fs->regs.reg[R_LR].loc.offset = (long) ®s->link - new_cfa; + + fs->regs.reg[ARG_POINTER_REGNUM].how = REG_SAVED_OFFSET; + fs->regs.reg[ARG_POINTER_REGNUM].loc.offset = (long) ®s->nip - new_cfa; + fs->retaddr_column = ARG_POINTER_REGNUM; + fs->signal_frame = 1; + + if (hwcap == 0) + { + hwcap = ppc_linux_aux_vector (16); + /* These will already be set if we found AT_HWCAP. A nonzero + value stops us looking again if for some reason we couldn't + find AT_HWCAP. */ +#ifdef __powerpc64__ + hwcap |= 0xc0000000; +#else + hwcap |= 0x80000000; +#endif + } + + /* If we have a FPU... */ + if (hwcap & 0x08000000) + for (i = 0; i < 32; i++) + { + fs->regs.reg[i + 32].how = REG_SAVED_OFFSET; + fs->regs.reg[i + 32].loc.offset = (long) ®s->fpr[i] - new_cfa; + } + + /* If we have a VMX unit... */ + if (hwcap & 0x10000000) + { + struct gcc_vregs *vregs; +#ifdef __powerpc64__ + vregs = regs->vp; +#else + vregs = ®s->vregs; +#endif + if (regs->msr & (1 << 25)) + { + for (i = 0; i < 32; i++) + { + fs->regs.reg[i + R_VR0].how = REG_SAVED_OFFSET; + fs->regs.reg[i + R_VR0].loc.offset + = (long) &vregs->vr[i] - new_cfa; + } + + fs->regs.reg[R_VSCR].how = REG_SAVED_OFFSET; + fs->regs.reg[R_VSCR].loc.offset = (long) &vregs->vscr - new_cfa; + } + + fs->regs.reg[R_VRSAVE].how = REG_SAVED_OFFSET; + fs->regs.reg[R_VRSAVE].loc.offset = (long) &vregs->vsave - new_cfa; + } + + /* If we have SPE register high-parts... we check at compile-time to + avoid expanding the code for all other PowerPC. */ +#ifdef __SPE__ + for (i = 0; i < 32; i++) + { + fs->regs.reg[i + FIRST_PSEUDO_REGISTER - 1].how = REG_SAVED_OFFSET; + fs->regs.reg[i + FIRST_PSEUDO_REGISTER - 1].loc.offset + = (long) ®s->vregs - new_cfa + 4 * i; + } +#endif + + return _URC_NO_REASON; +} + +#define MD_FROB_UPDATE_CONTEXT frob_update_context + +static void +frob_update_context (struct _Unwind_Context *context, _Unwind_FrameState *fs ATTRIBUTE_UNUSED) +{ + const unsigned int *pc = (const unsigned int *) context->ra; + + /* Fix up for 2.6.12 - 2.6.16 Linux kernels that have vDSO, but don't + have S flag in it. */ +#ifdef __powerpc64__ + /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */ + /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */ + if (pc[0] == 0x38210000 + SIGNAL_FRAMESIZE + && (pc[1] == 0x38000077 || pc[1] == 0x380000AC) + && pc[2] == 0x44000002) + _Unwind_SetSignalFrame (context, 1); +#else + /* li r0, 0x7777; sc (sigreturn old) */ + /* li r0, 0x0077; sc (sigreturn new) */ + /* li r0, 0x6666; sc (rt_sigreturn old) */ + /* li r0, 0x00AC; sc (rt_sigreturn new) */ + if ((pc[0] == 0x38007777 || pc[0] == 0x38000077 + || pc[0] == 0x38006666 || pc[0] == 0x380000AC) + && pc[1] == 0x44000002) + _Unwind_SetSignalFrame (context, 1); +#endif + +#ifdef __powerpc64__ + if (fs->regs.reg[2].how == REG_UNSAVED) + { + /* If the current unwind info (FS) does not contain explicit info + saving R2, then we have to do a minor amount of code reading to + figure out if it was saved. The big problem here is that the + code that does the save/restore is generated by the linker, so + we have no good way to determine at compile time what to do. */ + unsigned int *insn + = (unsigned int *) _Unwind_GetGR (context, R_LR); + if (insn && *insn == 0xE8410028) + _Unwind_SetGRPtr (context, 2, context->cfa + 40); + } +#endif +} diff --git a/gcc/config/rs6000/linux.h b/gcc/config/rs6000/linux.h new file mode 100644 index 000000000..77c8f6103 --- /dev/null +++ b/gcc/config/rs6000/linux.h @@ -0,0 +1,134 @@ +/* Definitions of target machine for GNU compiler, + for PowerPC machines running Linux. + Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, + 2004, 2005, 2006, 2007, 2010, 2011 Free Software Foundation, Inc. + Contributed by Michael Meissner (meissner@cygnus.com). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* Linux doesn't support saving and restoring 64-bit regs in a 32-bit + process. */ +#define OS_MISSING_POWERPC64 1 + +/* We use glibc _mcount for profiling. */ +#define NO_PROFILE_COUNTERS 1 + +#ifdef SINGLE_LIBC +#define OPTION_GLIBC (DEFAULT_LIBC == LIBC_GLIBC) +#else +#define OPTION_GLIBC (linux_libc == LIBC_GLIBC) +#endif + +/* glibc has float and long double forms of math functions. */ +#undef TARGET_C99_FUNCTIONS +#define TARGET_C99_FUNCTIONS (OPTION_GLIBC) + +/* Whether we have sincos that follows the GNU extension. */ +#undef TARGET_HAS_SINCOS +#define TARGET_HAS_SINCOS (OPTION_GLIBC) + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define_std ("PPC"); \ + builtin_define_std ("powerpc"); \ + builtin_assert ("cpu=powerpc"); \ + builtin_assert ("machine=powerpc"); \ + TARGET_OS_SYSV_CPP_BUILTINS (); \ + } \ + while (0) + +#undef CPP_OS_DEFAULT_SPEC +#define CPP_OS_DEFAULT_SPEC "%(cpp_os_linux)" + +/* The GNU C++ standard library currently requires _GNU_SOURCE being + defined on glibc-based systems. This temporary hack accomplishes this, + it should go away as soon as libstdc++-v3 has a real fix. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)" + +#undef LINK_SHLIB_SPEC +#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}" + +#undef LIB_DEFAULT_SPEC +#define LIB_DEFAULT_SPEC "%(lib_linux)" + +#undef STARTFILE_DEFAULT_SPEC +#define STARTFILE_DEFAULT_SPEC "%(startfile_linux)" + +#undef ENDFILE_DEFAULT_SPEC +#define ENDFILE_DEFAULT_SPEC "%(endfile_linux)" + +#undef LINK_START_DEFAULT_SPEC +#define LINK_START_DEFAULT_SPEC "%(link_start_linux)" + +#undef LINK_OS_DEFAULT_SPEC +#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)" + +#define LINK_GCC_C_SEQUENCE_SPEC \ + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}" + +/* Use --as-needed -lgcc_s for eh support. */ +#ifdef HAVE_LD_AS_NEEDED +#define USE_LD_AS_NEEDED 1 +#endif + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC GNU/Linux)"); + +/* Override rs6000.h definition. */ +#undef ASM_APP_ON +#define ASM_APP_ON "#APP\n" + +/* Override rs6000.h definition. */ +#undef ASM_APP_OFF +#define ASM_APP_OFF "#NO_APP\n" + +/* For backward compatibility, we must continue to use the AIX + structure return convention. */ +#undef DRAFT_V4_STRUCT_RET +#define DRAFT_V4_STRUCT_RET 1 + +/* We are 32-bit all the time, so optimize a little. */ +#undef TARGET_64BIT +#define TARGET_64BIT 0 + +/* We don't need to generate entries in .fixup, except when + -mrelocatable or -mrelocatable-lib is given. */ +#undef RELOCATABLE_NEEDS_FIXUP +#define RELOCATABLE_NEEDS_FIXUP \ + (target_flags & target_flags_explicit & MASK_RELOCATABLE) + +#define TARGET_POSIX_IO + +#define MD_UNWIND_SUPPORT "config/rs6000/linux-unwind.h" + +#ifdef TARGET_LIBC_PROVIDES_SSP +/* ppc32 glibc provides __stack_chk_guard in -0x7008(2). */ +#define TARGET_THREAD_SSP_OFFSET -0x7008 +#endif + +#define POWERPC_LINUX + +/* ppc linux has 128-bit long double support in glibc 2.4 and later. */ +#ifdef TARGET_DEFAULT_LONG_DOUBLE_128 +#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128 +#endif + +/* Static stack checking is supported by means of probes. */ +#define STACK_CHECK_STATIC_BUILTIN 1 diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h new file mode 100644 index 000000000..e6840d63e --- /dev/null +++ b/gcc/config/rs6000/linux64.h @@ -0,0 +1,569 @@ +/* Definitions of target machine for GNU compiler, + for 64 bit PowerPC linux. + Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, + 2009, 2010, 2011 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef RS6000_BI_ARCH + +#undef DEFAULT_ABI +#define DEFAULT_ABI ABI_AIX + +#undef TARGET_64BIT +#define TARGET_64BIT 1 + +#define DEFAULT_ARCH64_P 1 +#define RS6000_BI_ARCH_P 0 + +#else + +#define DEFAULT_ARCH64_P (TARGET_DEFAULT & MASK_64BIT) +#define RS6000_BI_ARCH_P 1 + +#endif + +#ifdef IN_LIBGCC2 +#undef TARGET_64BIT +#ifdef __powerpc64__ +#define TARGET_64BIT 1 +#else +#define TARGET_64BIT 0 +#endif +#endif + +#undef TARGET_AIX +#define TARGET_AIX TARGET_64BIT + +#ifdef HAVE_LD_NO_DOT_SYMS +/* New ABI uses a local sym for the function entry point. */ +extern int dot_symbols; +#undef DOT_SYMBOLS +#define DOT_SYMBOLS dot_symbols +#endif + +#define TARGET_PROFILE_KERNEL profile_kernel + +#define TARGET_USES_LINUX64_OPT 1 +#ifdef HAVE_LD_LARGE_TOC +#undef TARGET_CMODEL +#define TARGET_CMODEL rs6000_current_cmodel +#define SET_CMODEL(opt) rs6000_current_cmodel = opt +#else +#define SET_CMODEL(opt) do {} while (0) +#endif + +#undef PROCESSOR_DEFAULT +#define PROCESSOR_DEFAULT PROCESSOR_POWER7 +#undef PROCESSOR_DEFAULT64 +#define PROCESSOR_DEFAULT64 PROCESSOR_POWER7 + +/* We don't need to generate entries in .fixup, except when + -mrelocatable or -mrelocatable-lib is given. */ +#undef RELOCATABLE_NEEDS_FIXUP +#define RELOCATABLE_NEEDS_FIXUP \ + (target_flags & target_flags_explicit & MASK_RELOCATABLE) + +#undef RS6000_ABI_NAME +#define RS6000_ABI_NAME "linux" + +#define INVALID_64BIT "-m%s not supported in this configuration" +#define INVALID_32BIT INVALID_64BIT + +#undef SUBSUBTARGET_OVERRIDE_OPTIONS +#define SUBSUBTARGET_OVERRIDE_OPTIONS \ + do \ + { \ + if (!rs6000_explicit_options.alignment) \ + rs6000_alignment_flags = MASK_ALIGN_NATURAL; \ + if (TARGET_64BIT) \ + { \ + if (DEFAULT_ABI != ABI_AIX) \ + { \ + rs6000_current_abi = ABI_AIX; \ + error (INVALID_64BIT, "call"); \ + } \ + dot_symbols = !strcmp (rs6000_abi_name, "aixdesc"); \ + if (target_flags & MASK_RELOCATABLE) \ + { \ + target_flags &= ~MASK_RELOCATABLE; \ + error (INVALID_64BIT, "relocatable"); \ + } \ + if (target_flags & MASK_EABI) \ + { \ + target_flags &= ~MASK_EABI; \ + error (INVALID_64BIT, "eabi"); \ + } \ + if (TARGET_PROTOTYPE) \ + { \ + target_prototype = 0; \ + error (INVALID_64BIT, "prototype"); \ + } \ + if ((target_flags & MASK_POWERPC64) == 0) \ + { \ + target_flags |= MASK_POWERPC64; \ + error ("-m64 requires a PowerPC64 cpu"); \ + } \ + if ((target_flags_explicit & MASK_MINIMAL_TOC) != 0) \ + { \ + if (rs6000_explicit_options.cmodel \ + && rs6000_current_cmodel != CMODEL_SMALL) \ + error ("-mcmodel incompatible with other toc options"); \ + SET_CMODEL (CMODEL_SMALL); \ + } \ + else \ + { \ + if (!rs6000_explicit_options.cmodel) \ + SET_CMODEL (CMODEL_MEDIUM); \ + if (rs6000_current_cmodel != CMODEL_SMALL) \ + { \ + TARGET_NO_FP_IN_TOC = 0; \ + TARGET_NO_SUM_IN_TOC = 0; \ + } \ + } \ + } \ + else \ + { \ + if (!RS6000_BI_ARCH_P) \ + error (INVALID_32BIT, "32"); \ + if (TARGET_PROFILE_KERNEL) \ + { \ + TARGET_PROFILE_KERNEL = 0; \ + error (INVALID_32BIT, "profile-kernel"); \ + } \ + if (rs6000_explicit_options.cmodel) \ + { \ + SET_CMODEL (CMODEL_SMALL); \ + error (INVALID_32BIT, "cmodel"); \ + } \ + } \ + } \ + while (0) + +#ifdef RS6000_BI_ARCH + +#undef OPTION_TARGET_CPU_DEFAULT +#define OPTION_TARGET_CPU_DEFAULT \ + (((TARGET_DEFAULT ^ target_flags) & MASK_64BIT) \ + ? (char *) 0 : TARGET_CPU_DEFAULT) + +#endif + +#undef ASM_DEFAULT_SPEC +#undef ASM_SPEC +#undef LINK_OS_LINUX_SPEC + +/* FIXME: This will quite possibly choose the wrong dynamic linker. */ +#undef LINK_OS_GNU_SPEC +#define LINK_OS_GNU_SPEC LINK_OS_LINUX_SPEC + +#ifndef RS6000_BI_ARCH +#define ASM_DEFAULT_SPEC "-mppc64" +#define ASM_SPEC "%(asm_spec64) %(asm_spec_common)" +#define LINK_OS_LINUX_SPEC "%(link_os_linux_spec64)" +#else +#if DEFAULT_ARCH64_P +#define ASM_DEFAULT_SPEC "-mppc%{!m32:64}" +#define ASM_SPEC "%{m32:%(asm_spec32)}%{!m32:%(asm_spec64)} %(asm_spec_common)" +#define LINK_OS_LINUX_SPEC "%{m32:%(link_os_linux_spec32)}%{!m32:%(link_os_linux_spec64)}" +#else +#define ASM_DEFAULT_SPEC "-mppc%{m64:64}" +#define ASM_SPEC "%{!m64:%(asm_spec32)}%{m64:%(asm_spec64)} %(asm_spec_common)" +#define LINK_OS_LINUX_SPEC "%{!m64:%(link_os_linux_spec32)}%{m64:%(link_os_linux_spec64)}" +#endif +#endif + +#define ASM_SPEC32 "-a32 \ +%{mrelocatable} %{mrelocatable-lib} %{fpic:-K PIC} %{fPIC:-K PIC} \ +%{memb} %{!memb: %{msdata=eabi: -memb}} \ +%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \ + %{mcall-freebsd: -mbig} \ + %{mcall-i960-old: -mlittle} \ + %{mcall-linux: -mbig} \ + %{mcall-gnu: -mbig} \ + %{mcall-netbsd: -mbig} \ +}}}}" + +#define ASM_SPEC64 "-a64" + +#define ASM_SPEC_COMMON "%(asm_cpu) \ +%{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}} \ +%{mlittle} %{mlittle-endian} %{mbig} %{mbig-endian}" + +#undef SUBSUBTARGET_EXTRA_SPECS +#define SUBSUBTARGET_EXTRA_SPECS \ + { "asm_spec_common", ASM_SPEC_COMMON }, \ + { "asm_spec32", ASM_SPEC32 }, \ + { "asm_spec64", ASM_SPEC64 }, \ + { "link_os_linux_spec32", LINK_OS_LINUX_SPEC32 }, \ + { "link_os_linux_spec64", LINK_OS_LINUX_SPEC64 }, + +#undef MULTILIB_DEFAULTS +#if DEFAULT_ARCH64_P +#define MULTILIB_DEFAULTS { "m64" } +#else +#define MULTILIB_DEFAULTS { "m32" } +#endif + +#ifndef RS6000_BI_ARCH + +/* 64-bit PowerPC Linux is always big-endian. */ +#undef TARGET_LITTLE_ENDIAN +#define TARGET_LITTLE_ENDIAN 0 + +/* 64-bit PowerPC Linux always has a TOC. */ +#undef TARGET_TOC +#define TARGET_TOC 1 + +/* Some things from sysv4.h we don't do when 64 bit. */ +#undef TARGET_RELOCATABLE +#define TARGET_RELOCATABLE 0 +#undef TARGET_EABI +#define TARGET_EABI 0 +#undef TARGET_PROTOTYPE +#define TARGET_PROTOTYPE 0 +#undef RELOCATABLE_NEEDS_FIXUP +#define RELOCATABLE_NEEDS_FIXUP 0 + +#endif + +/* We use glibc _mcount for profiling. */ +#define NO_PROFILE_COUNTERS 1 +#define PROFILE_HOOK(LABEL) \ + do { if (TARGET_64BIT) output_profile_hook (LABEL); } while (0) + +/* PowerPC64 Linux word-aligns FP doubles when -malign-power is given. */ +#undef ADJUST_FIELD_ALIGN +#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \ + ((TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \ + ? 128 \ + : (TARGET_64BIT \ + && TARGET_ALIGN_NATURAL == 0 \ + && TYPE_MODE (strip_array_types (TREE_TYPE (FIELD))) == DFmode) \ + ? MIN ((COMPUTED), 32) \ + : (COMPUTED)) + +/* PowerPC64 Linux increases natural record alignment to doubleword if + the first field is an FP double, only if in power alignment mode. */ +#undef ROUND_TYPE_ALIGN +#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \ + ((TARGET_64BIT \ + && (TREE_CODE (STRUCT) == RECORD_TYPE \ + || TREE_CODE (STRUCT) == UNION_TYPE \ + || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \ + && TARGET_ALIGN_NATURAL == 0) \ + ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \ + : MAX ((COMPUTED), (SPECIFIED))) + +/* Use the default for compiling target libs. */ +#ifdef IN_TARGET_LIBS +#undef TARGET_ALIGN_NATURAL +#define TARGET_ALIGN_NATURAL 1 +#endif + +/* Indicate that jump tables go in the text section. */ +#undef JUMP_TABLES_IN_TEXT_SECTION +#define JUMP_TABLES_IN_TEXT_SECTION TARGET_64BIT + +/* The linux ppc64 ABI isn't explicit on whether aggregates smaller + than a doubleword should be padded upward or downward. You could + reasonably assume that they follow the normal rules for structure + layout treating the parameter area as any other block of memory, + then map the reg param area to registers. i.e. pad upward. + Setting both of the following defines results in this behavior. + Setting just the first one will result in aggregates that fit in a + doubleword being padded downward, and others being padded upward. + Not a bad idea as this results in struct { int x; } being passed + the same way as an int. */ +#define AGGREGATE_PADDING_FIXED TARGET_64BIT +#define AGGREGATES_PAD_UPWARD_ALWAYS 0 + +/* Specify padding for the last element of a block move between + registers and memory. FIRST is nonzero if this is the only + element. */ +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ + (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE)) + +/* Linux doesn't support saving and restoring 64-bit regs in a 32-bit + process. */ +#define OS_MISSING_POWERPC64 !TARGET_64BIT + +#ifdef SINGLE_LIBC +#define OPTION_GLIBC (DEFAULT_LIBC == LIBC_GLIBC) +#else +#define OPTION_GLIBC (linux_libc == LIBC_GLIBC) +#endif + +/* glibc has float and long double forms of math functions. */ +#undef TARGET_C99_FUNCTIONS +#define TARGET_C99_FUNCTIONS (OPTION_GLIBC) + +/* Whether we have sincos that follows the GNU extension. */ +#undef TARGET_HAS_SINCOS +#define TARGET_HAS_SINCOS (OPTION_GLIBC) + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + if (TARGET_64BIT) \ + { \ + builtin_define ("__PPC__"); \ + builtin_define ("__PPC64__"); \ + builtin_define ("__powerpc__"); \ + builtin_define ("__powerpc64__"); \ + builtin_assert ("cpu=powerpc64"); \ + builtin_assert ("machine=powerpc64"); \ + } \ + else \ + { \ + builtin_define_std ("PPC"); \ + builtin_define_std ("powerpc"); \ + builtin_assert ("cpu=powerpc"); \ + builtin_assert ("machine=powerpc"); \ + TARGET_OS_SYSV_CPP_BUILTINS (); \ + } \ + } \ + while (0) + +#undef CPP_OS_DEFAULT_SPEC +#define CPP_OS_DEFAULT_SPEC "%(cpp_os_linux)" + +/* The GNU C++ standard library currently requires _GNU_SOURCE being + defined on glibc-based systems. This temporary hack accomplishes this, + it should go away as soon as libstdc++-v3 has a real fix. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)" + +#undef LINK_SHLIB_SPEC +#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}" + +#undef LIB_DEFAULT_SPEC +#define LIB_DEFAULT_SPEC "%(lib_linux)" + +#undef STARTFILE_DEFAULT_SPEC +#define STARTFILE_DEFAULT_SPEC "%(startfile_linux)" + +#undef ENDFILE_DEFAULT_SPEC +#define ENDFILE_DEFAULT_SPEC "%(endfile_linux)" + +#undef LINK_START_DEFAULT_SPEC +#define LINK_START_DEFAULT_SPEC "%(link_start_linux)" + +#undef LINK_OS_DEFAULT_SPEC +#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)" + +#define GLIBC_DYNAMIC_LINKER32 "/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld64.so.1" +#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0" +#if DEFAULT_LIBC == LIBC_UCLIBC +#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" +#elif DEFAULT_LIBC == LIBC_GLIBC +#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:" U ";:" G "}" +#else +#error "Unsupported DEFAULT_LIBC" +#endif +#define LINUX_DYNAMIC_LINKER32 \ + CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER32, UCLIBC_DYNAMIC_LINKER32) +#define LINUX_DYNAMIC_LINKER64 \ + CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER64, UCLIBC_DYNAMIC_LINKER64) + + +#define LINK_OS_LINUX_SPEC32 "-m elf32ppclinux %{!shared: %{!static: \ + %{rdynamic:-export-dynamic} \ + -dynamic-linker " LINUX_DYNAMIC_LINKER32 "}}" + +#define LINK_OS_LINUX_SPEC64 "-m elf64ppc %{!shared: %{!static: \ + %{rdynamic:-export-dynamic} \ + -dynamic-linker " LINUX_DYNAMIC_LINKER64 "}}" + +#undef TOC_SECTION_ASM_OP +#define TOC_SECTION_ASM_OP \ + (TARGET_64BIT \ + ? "\t.section\t\".toc\",\"aw\"" \ + : "\t.section\t\".got\",\"aw\"") + +#undef MINIMAL_TOC_SECTION_ASM_OP +#define MINIMAL_TOC_SECTION_ASM_OP \ + (TARGET_64BIT \ + ? "\t.section\t\".toc1\",\"aw\"" \ + : ((TARGET_RELOCATABLE || flag_pic) \ + ? "\t.section\t\".got2\",\"aw\"" \ + : "\t.section\t\".got1\",\"aw\"")) + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC64 GNU/Linux)"); + +/* Must be at least as big as our pointer type. */ +#undef SIZE_TYPE +#define SIZE_TYPE (TARGET_64BIT ? "long unsigned int" : "unsigned int") + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int") + +#undef WCHAR_TYPE +#define WCHAR_TYPE (TARGET_64BIT ? "int" : "long int") +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + +/* Override rs6000.h definition. */ +#undef ASM_APP_ON +#define ASM_APP_ON "#APP\n" + +/* Override rs6000.h definition. */ +#undef ASM_APP_OFF +#define ASM_APP_OFF "#NO_APP\n" + +/* PowerPC no-op instruction. */ +#undef RS6000_CALL_GLUE +#define RS6000_CALL_GLUE (TARGET_64BIT ? "nop" : "cror 31,31,31") + +#undef RS6000_MCOUNT +#define RS6000_MCOUNT "_mcount" + +#ifdef __powerpc64__ +/* _init and _fini functions are built from bits spread across many + object files, each potentially with a different TOC pointer. For + that reason, place a nop after the call so that the linker can + restore the TOC pointer if a TOC adjusting call stub is needed. */ +#if DOT_SYMBOLS +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ + asm (SECTION_OP "\n" \ +" bl ." #FUNC "\n" \ +" nop\n" \ +" .previous"); +#else +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ + asm (SECTION_OP "\n" \ +" bl " #FUNC "\n" \ +" nop\n" \ +" .previous"); +#endif +#endif + +/* FP save and restore routines. */ +#undef SAVE_FP_PREFIX +#define SAVE_FP_PREFIX (TARGET_64BIT ? "._savef" : "_savefpr_") +#undef SAVE_FP_SUFFIX +#define SAVE_FP_SUFFIX "" +#undef RESTORE_FP_PREFIX +#define RESTORE_FP_PREFIX (TARGET_64BIT ? "._restf" : "_restfpr_") +#undef RESTORE_FP_SUFFIX +#define RESTORE_FP_SUFFIX "" + +/* Dwarf2 debugging. */ +#undef PREFERRED_DEBUGGING_TYPE +#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG + +/* This is how to declare the size of a function. */ +#undef ASM_DECLARE_FUNCTION_SIZE +#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \ + do \ + { \ + if (!flag_inhibit_size_directive) \ + { \ + fputs ("\t.size\t", (FILE)); \ + if (TARGET_64BIT && DOT_SYMBOLS) \ + putc ('.', (FILE)); \ + assemble_name ((FILE), (FNAME)); \ + fputs (",.-", (FILE)); \ + rs6000_output_function_entry (FILE, FNAME); \ + putc ('\n', (FILE)); \ + } \ + } \ + while (0) + +/* Return nonzero if this entry is to be written into the constant + pool in a special way. We do so if this is a SYMBOL_REF, LABEL_REF + or a CONST containing one of them. If -mfp-in-toc (the default), + we also do this for floating-point constants. We actually can only + do this if the FP formats of the target and host machines are the + same, but we can't check that since not every file that uses + the macros includes real.h. We also do this when we can write the + entry into the TOC and the entry is not larger than a TOC entry. */ + +#undef ASM_OUTPUT_SPECIAL_POOL_ENTRY_P +#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) \ + (TARGET_TOC \ + && (GET_CODE (X) == SYMBOL_REF \ + || (GET_CODE (X) == CONST && GET_CODE (XEXP (X, 0)) == PLUS \ + && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF) \ + || GET_CODE (X) == LABEL_REF \ + || (GET_CODE (X) == CONST_INT \ + && GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \ + || (GET_CODE (X) == CONST_DOUBLE \ + && ((TARGET_64BIT \ + && (TARGET_MINIMAL_TOC \ + || (SCALAR_FLOAT_MODE_P (GET_MODE (X)) \ + && ! TARGET_NO_FP_IN_TOC))) \ + || (!TARGET_64BIT \ + && !TARGET_NO_FP_IN_TOC \ + && !TARGET_RELOCATABLE \ + && SCALAR_FLOAT_MODE_P (GET_MODE (X)) \ + && BITS_PER_WORD == HOST_BITS_PER_INT))))) + +/* Select a format to encode pointers in exception handling data. CODE + is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is + true if the symbol may be affected by dynamic relocations. */ +#undef ASM_PREFERRED_EH_DATA_FORMAT +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ + ((TARGET_64BIT || flag_pic || TARGET_RELOCATABLE) \ + ? (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel \ + | (TARGET_64BIT ? DW_EH_PE_udata8 : DW_EH_PE_sdata4)) \ + : DW_EH_PE_absptr) + +/* For backward compatibility, we must continue to use the AIX + structure return convention. */ +#undef DRAFT_V4_STRUCT_RET +#define DRAFT_V4_STRUCT_RET (!TARGET_64BIT) + +#define TARGET_POSIX_IO + +#define LINK_GCC_C_SEQUENCE_SPEC \ + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}" + +/* Use --as-needed -lgcc_s for eh support. */ +#ifdef HAVE_LD_AS_NEEDED +#define USE_LD_AS_NEEDED 1 +#endif + +#define MD_UNWIND_SUPPORT "config/rs6000/linux-unwind.h" + +#ifdef TARGET_LIBC_PROVIDES_SSP +/* ppc32 glibc provides __stack_chk_guard in -0x7008(2), + ppc64 glibc provides it at -0x7010(13). */ +#define TARGET_THREAD_SSP_OFFSET (TARGET_64BIT ? -0x7010 : -0x7008) +#endif + +#define POWERPC_LINUX + +/* ppc{32,64} linux has 128-bit long double support in glibc 2.4 and later. */ +#ifdef TARGET_DEFAULT_LONG_DOUBLE_128 +#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128 +#endif + +/* Static stack checking is supported by means of probes. */ +#define STACK_CHECK_STATIC_BUILTIN 1 + +/* The default value isn't sufficient in 64-bit mode. */ +#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) diff --git a/gcc/config/rs6000/linux64.opt b/gcc/config/rs6000/linux64.opt new file mode 100644 index 000000000..28177f1ed --- /dev/null +++ b/gcc/config/rs6000/linux64.opt @@ -0,0 +1,28 @@ +; Options for 64-bit PowerPC Linux. +; +; Copyright (C) 2005, 2007, 2009, 2010 Free Software Foundation, Inc. +; Contributed by Aldy Hernandez . +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT +; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +; License for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; . + +mprofile-kernel +Target Report Var(profile_kernel) Save +Call mcount for profiling before a function prologue + +mcmodel= +Target RejectNegative Joined +Select code model diff --git a/gcc/config/rs6000/linuxaltivec.h b/gcc/config/rs6000/linuxaltivec.h new file mode 100644 index 000000000..a6e1523ea --- /dev/null +++ b/gcc/config/rs6000/linuxaltivec.h @@ -0,0 +1,30 @@ +/* Definitions of target machine for GNU compiler, + for AltiVec enhanced PowerPC machines running GNU/Linux. + Copyright (C) 2001, 2003, 2007 Free Software Foundation, Inc. + Contributed by Aldy Hernandez (aldyh@redhat.com). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC AltiVec GNU/Linux)"); + +/* Override rs6000.h and sysv4.h definition. */ +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_ALTIVEC) + +#undef SUBSUBTARGET_OVERRIDE_OPTIONS +#define SUBSUBTARGET_OVERRIDE_OPTIONS rs6000_altivec_abi = 1 diff --git a/gcc/config/rs6000/linuxspe.h b/gcc/config/rs6000/linuxspe.h new file mode 100644 index 000000000..3cef9d9dc --- /dev/null +++ b/gcc/config/rs6000/linuxspe.h @@ -0,0 +1,44 @@ +/* Definitions of target machine for GNU compiler, + for PowerPC e500 machines running GNU/Linux. + Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 + Free Software Foundation, Inc. + Contributed by Aldy Hernandez (aldy@quesejoda.com). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (PowerPC E500 GNU/Linux)"); + +/* Override rs6000.h and sysv4.h definition. */ +#undef TARGET_DEFAULT +#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_STRICT_ALIGN) + +#undef SUBSUBTARGET_OVERRIDE_OPTIONS +#define SUBSUBTARGET_OVERRIDE_OPTIONS \ + if (rs6000_select[1].string == NULL) \ + rs6000_cpu = PROCESSOR_PPC8540; \ + if (!rs6000_explicit_options.spe_abi) \ + rs6000_spe_abi = 1; \ + if (!rs6000_explicit_options.float_gprs) \ + rs6000_float_gprs = 1; \ + if (!rs6000_explicit_options.spe) \ + rs6000_spe = 1; \ + if (target_flags & MASK_64BIT) \ + error ("-m64 not supported in this configuration") + +#undef ASM_DEFAULT_SPEC +#define ASM_DEFAULT_SPEC "-mppc -mspe -me500" diff --git a/gcc/config/rs6000/lynx.h b/gcc/config/rs6000/lynx.h new file mode 100644 index 000000000..1e923ea38 --- /dev/null +++ b/gcc/config/rs6000/lynx.h @@ -0,0 +1,125 @@ +/* Definitions for Rs6000 running LynxOS. + Copyright (C) 1995, 1996, 2000, 2002, 2003, 2004, 2005, 2007, 2010 + Free Software Foundation, Inc. + Contributed by David Henkel-Wallace, Cygnus Support (gumby@cygnus.com) + Rewritten by Adam Nemet, LynuxWorks Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* Override the definition in sysv4.h. */ + +#undef TARGET_VERSION +#define TARGET_VERSION fputs (" (PowerPC/LynxOS)", stderr); + +/* Undefine the definition to enable the LynxOS default from the + top-level lynx.h. */ + +#undef SUBTARGET_EXTRA_SPECS + +/* Get rid off the spec definitions from rs6000/sysv4.h. */ + +#undef CPP_SPEC +#define CPP_SPEC \ +"%{msoft-float: -D_SOFT_FLOAT} \ + %(cpp_cpu) \ + %(cpp_os_lynx)" + +/* LynxOS only supports big-endian on PPC so we override the + definition from sysv4.h. Since the LynxOS 4.0 compiler was set to + return every structure in memory regardless of their size we have + to emulate the same behavior here with disabling the SVR4 structure + returning. */ + +#undef CC1_SPEC +#define CC1_SPEC \ +"%{G*} %{mno-sdata:-msdata=none} \ + %{maltivec:-mabi=altivec} \ + -maix-struct-return" + +#undef ASM_SPEC +#define ASM_SPEC \ +"%(asm_cpu) \ + %{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}}" + +#undef STARTFILE_SPEC +#undef ENDFILE_SPEC +#undef LIB_SPEC +#undef LINK_SPEC +#define LINK_SPEC \ +"%{!msdata=none:%{G*}} %{msdata=none:-G0} \ + %(link_os_lynx)" + +/* Override the definition from sysv4.h. */ + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("__BIG_ENDIAN__"); \ + builtin_define ("__powerpc__"); \ + builtin_assert ("cpu=powerpc"); \ + builtin_assert ("machine=powerpc"); \ + builtin_define ("__PPC__"); \ + } \ + while (0) + +/* Override the rs6000.h definition. */ + +#undef ASM_APP_ON +#define ASM_APP_ON "#APP\n" + +/* Override the rs6000.h definition. */ + +#undef ASM_APP_OFF +#define ASM_APP_OFF "#NO_APP\n" + +/* LynxOS does not do anything with .fixup plus let's not create + writable section for linkonce.r and linkonce.t. */ + +#undef RELOCATABLE_NEEDS_FIXUP + +/* Override these from rs6000.h with the generic definition. */ + +#undef SIZE_TYPE +#undef ASM_OUTPUT_ALIGN +#undef PREFERRED_DEBUGGING_TYPE + +/* The file rs6000.c defines TARGET_HAVE_TLS unconditionally to the + value of HAVE_AS_TLS. HAVE_AS_TLS is true as gas support for TLS + is detected by configure. Override the definition to false. */ + +#undef HAVE_AS_TLS +#define HAVE_AS_TLS 0 + +#define DBX_REGISTER_NUMBER(REGNO) rs6000_dbx_register_number (REGNO) + +#ifdef CRT_BEGIN +/* This function is part of crtbegin*.o which is at the beginning of + the link and is called from .fini which is usually toward the end + of the executable. Make it longcall so that we don't limit the + text size of the executables to 32M. */ + +static void __do_global_dtors_aux (void) __attribute__ ((longcall)); +#endif /* CRT_BEGIN */ + +#ifdef CRT_END +/* Similarly here. This function resides in crtend*.o which is toward + to end of the link and is called from .init which is at the + beginning. */ + +static void __do_global_ctors_aux (void) __attribute__ ((longcall)); +#endif /* CRT_END */ diff --git a/gcc/config/rs6000/milli.exp b/gcc/config/rs6000/milli.exp new file mode 100644 index 000000000..ea3a2b757 --- /dev/null +++ b/gcc/config/rs6000/milli.exp @@ -0,0 +1,7 @@ +#! +__mulh 0x3100 +__mull 0x3180 +__divss 0x3200 +__divus 0x3280 +__quoss 0x3300 +__quous 0x3380 diff --git a/gcc/config/rs6000/mpc.md b/gcc/config/rs6000/mpc.md new file mode 100644 index 000000000..415c68872 --- /dev/null +++ b/gcc/config/rs6000/mpc.md @@ -0,0 +1,111 @@ +;; Scheduling description for Motorola PowerPC processor cores. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "mpc,mpcfp") +(define_cpu_unit "iu_mpc,mciu_mpc" "mpc") +(define_cpu_unit "fpu_mpc" "mpcfp") +(define_cpu_unit "lsu_mpc,bpu_mpc" "mpc") + +;; MPCCORE 32-bit SCIU, MCIU, LSU, FPU, BPU +;; 505/801/821/823 + +(define_insn_reservation "mpccore-load" 2 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,store_c,sync") + (eq_attr "cpu" "mpccore")) + "lsu_mpc") + +(define_insn_reservation "mpccore-store" 2 + (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "mpccore")) + "lsu_mpc") + +(define_insn_reservation "mpccore-fpload" 2 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "mpccore")) + "lsu_mpc") + +(define_insn_reservation "mpccore-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "mpccore")) + "iu_mpc") + +(define_insn_reservation "mpccore-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "mpccore")) + "iu_mpc,iu_mpc") + +(define_insn_reservation "mpccore-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "mpccore")) + "iu_mpc,iu_mpc,iu_mpc") + +(define_insn_reservation "mpccore-imul" 2 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "mpccore")) + "mciu_mpc") + +; Divide latency varies greatly from 2-11, use 6 as average +(define_insn_reservation "mpccore-idiv" 6 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "mpccore")) + "mciu_mpc*6") + +(define_insn_reservation "mpccore-compare" 3 + (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "mpccore")) + "iu_mpc,nothing,bpu_mpc") + +(define_insn_reservation "mpccore-fpcompare" 2 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "mpccore")) + "fpu_mpc,bpu_mpc") + +(define_insn_reservation "mpccore-fp" 4 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "mpccore")) + "fpu_mpc*2") + +(define_insn_reservation "mpccore-dmul" 5 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "mpccore")) + "fpu_mpc*5") + +(define_insn_reservation "mpccore-sdiv" 10 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "mpccore")) + "fpu_mpc*10") + +(define_insn_reservation "mpccore-ddiv" 17 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "mpccore")) + "fpu_mpc*17") + +(define_insn_reservation "mpccore-mtjmpr" 4 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (eq_attr "cpu" "mpccore")) + "bpu_mpc") + +(define_insn_reservation "mpccore-jmpreg" 1 + (and (eq_attr "type" "jmpreg,branch,cr_logical,delayed_cr,mfcr,mtcr,isync") + (eq_attr "cpu" "mpccore")) + "bpu_mpc") + diff --git a/gcc/config/rs6000/netbsd.h b/gcc/config/rs6000/netbsd.h new file mode 100644 index 000000000..de16b3722 --- /dev/null +++ b/gcc/config/rs6000/netbsd.h @@ -0,0 +1,93 @@ +/* Definitions of target machine for GNU compiler, + for PowerPC NetBSD systems. + Copyright 2002, 2003, 2007, 2008, 2010 Free Software Foundation, Inc. + Contributed by Wasabi Systems, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#undef TARGET_OS_CPP_BUILTINS /* FIXME: sysv4.h should not define this! */ +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + NETBSD_OS_CPP_BUILTINS_ELF(); \ + builtin_define ("__powerpc__"); \ + builtin_assert ("cpu=powerpc"); \ + builtin_assert ("machine=powerpc"); \ + } \ + while (0) + +/* Override the default from rs6000.h to avoid conflicts with macros + defined in NetBSD header files. */ + +#undef RS6000_CPU_CPP_ENDIAN_BUILTINS +#define RS6000_CPU_CPP_ENDIAN_BUILTINS() \ + do \ + { \ + if (BYTES_BIG_ENDIAN) \ + { \ + builtin_define ("__BIG_ENDIAN__"); \ + builtin_assert ("machine=bigendian"); \ + } \ + else \ + { \ + builtin_define ("__LITTLE_ENDIAN__"); \ + builtin_assert ("machine=littleendian"); \ + } \ + } \ + while (0) + +/* Make GCC agree with . */ + +#undef SIZE_TYPE +#define SIZE_TYPE "unsigned int" + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE "int" + +/* Undo the spec mess from sysv4.h, and just define the specs + the way NetBSD systems actually expect. */ + +#undef CPP_SPEC +#define CPP_SPEC NETBSD_CPP_SPEC + +#undef LINK_SPEC +#define LINK_SPEC \ + "%{!msdata=none:%{G*}} %{msdata=none:-G0} \ + %(netbsd_link_spec)" + +#define NETBSD_ENTRY_POINT "_start" + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC NETBSD_STARTFILE_SPEC + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC "%(netbsd_endfile_spec)" + +#undef LIB_SPEC +#define LIB_SPEC NETBSD_LIB_SPEC + +#undef SUBTARGET_EXTRA_SPECS +#define SUBTARGET_EXTRA_SPECS \ + { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \ + { "netbsd_entry_point", NETBSD_ENTRY_POINT }, \ + { "netbsd_endfile_spec", NETBSD_ENDFILE_SPEC }, + + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf (stderr, " (NetBSD/powerpc ELF)"); + +#define DBX_REGISTER_NUMBER(REGNO) rs6000_dbx_register_number (REGNO) diff --git a/gcc/config/rs6000/option-defaults.h b/gcc/config/rs6000/option-defaults.h new file mode 100644 index 000000000..0ecbe75c0 --- /dev/null +++ b/gcc/config/rs6000/option-defaults.h @@ -0,0 +1,64 @@ +/* Definitions of default options for config/rs6000 configurations. + Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This header needs to be included after any other headers affecting + TARGET_DEFAULT. */ + +#if TARGET_AIX_OS +#define OPT_64 "maix64" +#define OPT_32 "maix32" +#else +#define OPT_64 "m64" +#define OPT_32 "m32" +#endif + +#ifndef MASK_64BIT +#define MASK_64BIT 0 +#endif + +#if TARGET_DEFAULT & MASK_64BIT +#define OPT_ARCH64 "!"OPT_32 +#define OPT_ARCH32 OPT_32 +#else +#define OPT_ARCH64 OPT_64 +#define OPT_ARCH32 "!"OPT_64 +#endif + +/* Support for a compile-time default CPU, et cetera. The rules are: + --with-cpu is ignored if -mcpu is specified; likewise --with-cpu-32 + and --with-cpu-64. + --with-tune is ignored if -mtune or -mcpu is specified; likewise + --with-tune-32 and --with-tune-64. + --with-float is ignored if -mhard-float or -msoft-float are + specified. */ +#define OPTION_DEFAULT_SPECS \ + {"tune", "%{!mtune=*:%{!mcpu=*:-mtune=%(VALUE)}}" }, \ + {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:-mtune=%(VALUE)}}}" }, \ + {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:-mtune=%(VALUE)}}}" }, \ + {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" }, \ + {"cpu_32", "%{" OPT_ARCH32 ":%{!mcpu=*:-mcpu=%(VALUE)}}" }, \ + {"cpu_64", "%{" OPT_ARCH64 ":%{!mcpu=*:-mcpu=%(VALUE)}}" }, \ + {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" } diff --git a/gcc/config/rs6000/paired.h b/gcc/config/rs6000/paired.h new file mode 100644 index 000000000..57c6ca4fd --- /dev/null +++ b/gcc/config/rs6000/paired.h @@ -0,0 +1,75 @@ +/* PowerPC 750CL user include file. + Copyright (C) 2007, 2009 Free Software Foundation, Inc. + Contributed by Revital Eres (eres@il.ibm.com). + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _PAIRED_H +#define _PAIRED_H + +#define vector __attribute__((vector_size(8))) + +#define paired_msub __builtin_paired_msub +#define paired_madd __builtin_paired_madd +#define paired_nmsub __builtin_paired_nmsub +#define paired_nmadd __builtin_paired_nmadd +#define paired_sum0 __builtin_paired_sum0 +#define paired_sum1 __builtin_paired_sum1 +#define paired_div __builtin_paired_divv2sf3 +#define paired_add __builtin_paired_addv2sf3 +#define paired_sub __builtin_paired_subv2sf3 +#define paired_mul __builtin_paired_mulv2sf3 +#define paired_muls0 __builtin_paired_muls0 +#define paired_muls1 __builtin_paired_muls1 +#define paired_madds0 __builtin_paired_madds0 +#define paired_madds1 __builtin_paired_madds1 +#define paired_merge00 __builtin_paired_merge00 +#define paired_merge01 __builtin_paired_merge01 +#define paired_merge10 __builtin_paired_merge10 +#define paired_merge11 __builtin_paired_merge11 +#define paired_abs __builtin_paired_absv2sf2 +#define paired_nabs __builtin_paired_nabsv2sf2 +#define paired_neg __builtin_paired_negv2sf2 +#define paired_sqrt __builtin_paired_sqrtv2sf2 +#define paired_res __builtin_paired_resv2sf2 +#define paired_stx __builtin_paired_stx +#define paired_lx __builtin_paired_lx +#define paired_cmpu0 __builtin_paired_cmpu0 +#define paired_cmpu1 __builtin_paired_cmpu1 +#define paired_sel __builtin_paired_selv2sf4 + +/* Condition register codes for Paired predicates. */ +#define LT 0 +#define GT 1 +#define EQ 2 +#define UN 3 + +#define paired_cmpu0_un(a,b) __builtin_paired_cmpu0 (UN, (a), (b)) +#define paired_cmpu0_eq(a,b) __builtin_paired_cmpu0 (EQ, (a), (b)) +#define paired_cmpu0_lt(a,b) __builtin_paired_cmpu0 (LT, (a), (b)) +#define paired_cmpu0_gt(a,b) __builtin_paired_cmpu0 (GT, (a), (b)) +#define paired_cmpu1_un(a,b) __builtin_paired_cmpu1 (UN, (a), (b)) +#define paired_cmpu1_eq(a,b) __builtin_paired_cmpu1 (EQ, (a), (b)) +#define paired_cmpu1_lt(a,b) __builtin_paired_cmpu1 (LT, (a), (b)) +#define paired_cmpu1_gt(a,b) __builtin_paired_cmpu1 (GT, (a), (b)) + +#endif /* _PAIRED_H */ diff --git a/gcc/config/rs6000/paired.md b/gcc/config/rs6000/paired.md new file mode 100644 index 000000000..0533f0097 --- /dev/null +++ b/gcc/config/rs6000/paired.md @@ -0,0 +1,527 @@ +;; PowerPC paired single and double hummer description +;; Copyright (C) 2007, 2009, 2010 +;; Free Software Foundation, Inc. +;; Contributed by David Edelsohn and Revital Eres +;; + +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with this program; see the file COPYING3. If not see +;; . + +(define_constants +[(UNSPEC_INTERHI_V2SF 330) + (UNSPEC_INTERLO_V2SF 331) + (UNSPEC_EXTEVEN_V2SF 332) + (UNSPEC_EXTODD_V2SF 333) +]) + +(define_insn "paired_negv2sf2" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_neg %0,%1" + [(set_attr "type" "fp")]) + +(define_insn "sqrtv2sf2" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (sqrt:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_rsqrte %0,%1" + [(set_attr "type" "fp")]) + +(define_insn "paired_absv2sf2" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_abs %0,%1" + [(set_attr "type" "fp")]) + +(define_insn "nabsv2sf2" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (neg:V2SF (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f"))))] + "TARGET_PAIRED_FLOAT" + "ps_nabs %0,%1" + [(set_attr "type" "fp")]) + +(define_insn "paired_addv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_add %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "paired_subv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_sub %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "paired_mulv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_mul %0,%1,%2" + [(set_attr "type" "fp")]) + +(define_insn "resv2sf2" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))] + "TARGET_PAIRED_FLOAT && flag_finite_math_only" + "ps_res %0,%1" + [(set_attr "type" "fp")]) + +(define_insn "paired_divv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_div %0,%1,%2" + [(set_attr "type" "sdiv")]) + +(define_insn "paired_madds0" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (fma:SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF (match_operand:V2SF 3 "gpc_reg_operand" "f") + (parallel [(const_int 0)]))) + (fma:SF + (vec_select:SF (match_dup 1) + (parallel [(const_int 1)])) + (vec_select:SF (match_dup 2) + (parallel [(const_int 0)])) + (vec_select:SF (match_dup 3) + (parallel [(const_int 1)])))))] + "TARGET_PAIRED_FLOAT" + "ps_madds0 %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "paired_madds1" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (fma:SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 1)])) + (vec_select:SF (match_operand:V2SF 3 "gpc_reg_operand" "f") + (parallel [(const_int 0)]))) + (fma:SF + (vec_select:SF (match_dup 1) + (parallel [(const_int 1)])) + (vec_select:SF (match_dup 2) + (parallel [(const_int 1)])) + (vec_select:SF (match_dup 3) + (parallel [(const_int 1)])))))] + "TARGET_PAIRED_FLOAT" + "ps_madds1 %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "*paired_madd" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (fma:V2SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (match_operand:V2SF 3 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_madd %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "*paired_msub" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (fma:V2SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (neg:V2SF (match_operand:V2SF 3 "gpc_reg_operand" "f"))))] + "TARGET_PAIRED_FLOAT" + "ps_msub %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "*paired_nmadd" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (neg:V2SF + (fma:V2SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (match_operand:V2SF 3 "gpc_reg_operand" "f"))))] + "TARGET_PAIRED_FLOAT" + "ps_nmadd %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "*paired_nmsub" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (neg:V2SF + (fma:V2SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (neg:V2SF (match_operand:V2SF 3 "gpc_reg_operand" "f")))))] + "TARGET_PAIRED_FLOAT" + "ps_nmsub %0,%1,%2,%3" + [(set_attr "type" "dmul")]) + +(define_insn "selv2sf4" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (if_then_else:SF (ge (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (match_operand:SF 4 "zero_fp_constant" "F")) + (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF (match_operand:V2SF 3 "gpc_reg_operand" "f") + (parallel [(const_int 0)]))) + (if_then_else:SF (ge (vec_select:SF (match_dup 1) + (parallel [(const_int 1)])) + (match_dup 4)) + (vec_select:SF (match_dup 2) + (parallel [(const_int 1)])) + (vec_select:SF (match_dup 3) + (parallel [(const_int 1)])))))] + + "TARGET_PAIRED_FLOAT" + "ps_sel %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "*movv2sf_paired" + [(set (match_operand:V2SF 0 "nonimmediate_operand" "=Z,f,f,o,r,r,f") + (match_operand:V2SF 1 "input_operand" "f,Z,f,r,o,r,W"))] + "TARGET_PAIRED_FLOAT + && (register_operand (operands[0], V2SFmode) + || register_operand (operands[1], V2SFmode))" +{ + switch (which_alternative) + { + case 0: return "psq_stx %1,%y0,0,0"; + case 1: return "psq_lx %0,%y1,0,0"; + case 2: return "ps_mr %0,%1"; + case 3: return "#"; + case 4: return "#"; + case 5: return "#"; + case 6: return "#"; + default: gcc_unreachable (); + } +} + [(set_attr "type" "fpstore,fpload,fp,*,*,*,*")]) + +(define_insn "paired_stx" + [(set (match_operand:V2SF 0 "memory_operand" "=Z") + (match_operand:V2SF 1 "gpc_reg_operand" "f"))] + "TARGET_PAIRED_FLOAT" + "psq_stx %1,%y0,0,0" + [(set_attr "type" "fpstore")]) + +(define_insn "paired_lx" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (match_operand:V2SF 1 "memory_operand" "Z"))] + "TARGET_PAIRED_FLOAT" + "psq_lx %0,%y1,0,0" + [(set_attr "type" "fpload")]) + + +(define_split + [(set (match_operand:V2SF 0 "nonimmediate_operand" "") + (match_operand:V2SF 1 "input_operand" ""))] + "TARGET_PAIRED_FLOAT && reload_completed + && gpr_or_gpr_p (operands[0], operands[1])" + [(pc)] + { + rs6000_split_multireg_move (operands[0], operands[1]); DONE; + }) + +(define_insn "paired_cmpu0" + [(set (match_operand:CCFP 0 "cc_reg_operand" "=y") + (compare:CCFP (vec_select:SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 0)]))))] + "TARGET_PAIRED_FLOAT" + "ps_cmpu0 %0,%1,%2" + [(set_attr "type" "fpcompare")]) + +(define_insn "paired_cmpu1" + [(set (match_operand:CCFP 0 "cc_reg_operand" "=y") + (compare:CCFP (vec_select:SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 1)])) + (vec_select:SF + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 1)]))))] + "TARGET_PAIRED_FLOAT" + "ps_cmpu1 %0,%1,%2" + [(set_attr "type" "fpcompare")]) + +(define_insn "paired_merge00" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 0)]))))] + "TARGET_PAIRED_FLOAT" + "ps_merge00 %0, %1, %2" + [(set_attr "type" "fp")]) + +(define_insn "paired_merge01" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 1)]))))] + "TARGET_PAIRED_FLOAT" + "ps_merge01 %0, %1, %2" + [(set_attr "type" "fp")]) + +(define_insn "paired_merge10" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 1)])) + (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 0)]))))] + "TARGET_PAIRED_FLOAT" + "ps_merge10 %0, %1, %2" + [(set_attr "type" "fp")]) + +(define_insn "paired_merge11" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 1)])) + (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 1)]))))] + "TARGET_PAIRED_FLOAT" + "ps_merge11 %0, %1, %2" + [(set_attr "type" "fp")]) + +(define_insn "paired_sum0" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF (plus:SF (vec_select:SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 1)]))) + (vec_select:SF + (match_operand:V2SF 3 "gpc_reg_operand" "f") + (parallel [(const_int 1)]))))] + "TARGET_PAIRED_FLOAT" + "ps_sum0 %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "paired_sum1" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF (vec_select:SF + (match_operand:V2SF 2 "gpc_reg_operand" "f") + (parallel [(const_int 1)])) + (plus:SF (vec_select:SF + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])) + (vec_select:SF + (match_operand:V2SF 3 "gpc_reg_operand" "f") + (parallel [(const_int 1)])))))] + "TARGET_PAIRED_FLOAT" + "ps_sum1 %0,%1,%2,%3" + [(set_attr "type" "fp")]) + +(define_insn "paired_muls0" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (mult:V2SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (vec_duplicate:V2SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 0)])))))] + "TARGET_PAIRED_FLOAT" + "ps_muls0 %0, %1, %2" + [(set_attr "type" "fp")]) + + +(define_insn "paired_muls1" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (mult:V2SF (match_operand:V2SF 2 "gpc_reg_operand" "f") + (vec_duplicate:V2SF + (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (parallel [(const_int 1)])))))] + "TARGET_PAIRED_FLOAT" + "ps_muls1 %0, %1, %2" + [(set_attr "type" "fp")]) + +(define_expand "vec_initv2sf" + [(match_operand:V2SF 0 "gpc_reg_operand" "=f") + (match_operand 1 "" "")] + "TARGET_PAIRED_FLOAT" +{ + paired_expand_vector_init (operands[0], operands[1]); + DONE; +}) + +(define_insn "*vconcatsf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (vec_concat:V2SF + (match_operand:SF 1 "gpc_reg_operand" "f") + (match_operand:SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" + "ps_merge00 %0, %1, %2" + [(set_attr "type" "fp")]) + +(define_expand "sminv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (smin:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" +{ + rtx tmp = gen_reg_rtx (V2SFmode); + + emit_insn (gen_subv2sf3 (tmp, operands[1], operands[2])); + emit_insn (gen_selv2sf4 (operands[0], tmp, operands[2], operands[1], CONST0_RTX (SFmode))); + DONE; +}) + +(define_expand "smaxv2sf3" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (smax:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT" +{ + rtx tmp = gen_reg_rtx (V2SFmode); + + emit_insn (gen_subv2sf3 (tmp, operands[1], operands[2])); + emit_insn (gen_selv2sf4 (operands[0], tmp, operands[1], operands[2], CONST0_RTX (SFmode))); + DONE; +}) + +(define_expand "reduc_smax_v2sf" + [(match_operand:V2SF 0 "gpc_reg_operand" "=f") + (match_operand:V2SF 1 "gpc_reg_operand" "f")] + "TARGET_PAIRED_FLOAT" +{ + rtx tmp_swap = gen_reg_rtx (V2SFmode); + rtx tmp = gen_reg_rtx (V2SFmode); + + emit_insn (gen_paired_merge10 (tmp_swap, operands[1], operands[1])); + emit_insn (gen_subv2sf3 (tmp, operands[1], tmp_swap)); + emit_insn (gen_selv2sf4 (operands[0], tmp, operands[1], tmp_swap, CONST0_RTX (SFmode))); + + DONE; +}) + +(define_expand "reduc_smin_v2sf" + [(match_operand:V2SF 0 "gpc_reg_operand" "=f") + (match_operand:V2SF 1 "gpc_reg_operand" "f")] + "TARGET_PAIRED_FLOAT" +{ + rtx tmp_swap = gen_reg_rtx (V2SFmode); + rtx tmp = gen_reg_rtx (V2SFmode); + + emit_insn (gen_paired_merge10 (tmp_swap, operands[1], operands[1])); + emit_insn (gen_subv2sf3 (tmp, operands[1], tmp_swap)); + emit_insn (gen_selv2sf4 (operands[0], tmp, tmp_swap, operands[1], CONST0_RTX (SFmode))); + + DONE; +}) + +(define_expand "vec_interleave_highv2sf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")] + UNSPEC_INTERHI_V2SF))] + "TARGET_PAIRED_FLOAT" + " +{ + emit_insn (gen_paired_merge00 (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_interleave_lowv2sf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")] + UNSPEC_INTERLO_V2SF))] + "TARGET_PAIRED_FLOAT" + " +{ + emit_insn (gen_paired_merge11 (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_extract_evenv2sf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")] + UNSPEC_EXTEVEN_V2SF))] + "TARGET_PAIRED_FLOAT" + " +{ + emit_insn (gen_paired_merge00 (operands[0], operands[1], operands[2])); + DONE; +}") + +(define_expand "vec_extract_oddv2sf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")] + UNSPEC_EXTODD_V2SF))] + "TARGET_PAIRED_FLOAT" + " +{ + emit_insn (gen_paired_merge11 (operands[0], operands[1], operands[2])); + DONE; +}") + + +(define_expand "reduc_splus_v2sf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (match_operand:V2SF 1 "gpc_reg_operand" "f"))] + "TARGET_PAIRED_FLOAT" + " +{ + emit_insn (gen_paired_sum1 (operands[0], operands[1], operands[1], operands[1])); + DONE; +}") + +(define_expand "movmisalignv2sf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (match_operand:V2SF 1 "gpc_reg_operand" "f"))] + "TARGET_PAIRED_FLOAT" +{ + paired_expand_vector_move (operands); + DONE; +}) + +(define_expand "vcondv2sf" + [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f") + (if_then_else:V2SF + (match_operator 3 "gpc_reg_operand" + [(match_operand:V2SF 4 "gpc_reg_operand" "f") + (match_operand:V2SF 5 "gpc_reg_operand" "f")]) + (match_operand:V2SF 1 "gpc_reg_operand" "f") + (match_operand:V2SF 2 "gpc_reg_operand" "f")))] + "TARGET_PAIRED_FLOAT && flag_unsafe_math_optimizations" + " +{ + if (paired_emit_vector_cond_expr (operands[0], operands[1], operands[2], + operands[3], operands[4], operands[5])) + DONE; + else + FAIL; +}") + diff --git a/gcc/config/rs6000/power4.md b/gcc/config/rs6000/power4.md new file mode 100644 index 000000000..60dbffd58 --- /dev/null +++ b/gcc/config/rs6000/power4.md @@ -0,0 +1,410 @@ +;; Scheduling description for IBM Power4 and PowerPC 970 processors. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; Sources: IBM Red Book and White Paper on POWER4 + +;; The POWER4 has 2 iu, 2 fpu, 2 lsu per engine (2 engines per chip). +;; Instructions that update more than one register get broken into two +;; (split) or more internal ops. The chip can issue up to 5 +;; internal ops per cycle. + +(define_automaton "power4iu,power4fpu,power4vec,power4misc") + +(define_cpu_unit "iu1_power4,iu2_power4" "power4iu") +(define_cpu_unit "lsu1_power4,lsu2_power4" "power4misc") +(define_cpu_unit "fpu1_power4,fpu2_power4" "power4fpu") +(define_cpu_unit "bpu_power4,cru_power4" "power4misc") +(define_cpu_unit "vec_power4,vecperm_power4" "power4vec") +(define_cpu_unit "du1_power4,du2_power4,du3_power4,du4_power4,du5_power4" + "power4misc") + +(define_reservation "lsq_power4" + "(du1_power4,lsu1_power4)\ + |(du2_power4,lsu2_power4)\ + |(du3_power4,lsu2_power4)\ + |(du4_power4,lsu1_power4)") + +(define_reservation "lsuq_power4" + "((du1_power4+du2_power4,lsu1_power4)\ + |(du2_power4+du3_power4,lsu2_power4)\ + |(du3_power4+du4_power4,lsu2_power4))\ + +(nothing,iu2_power4|nothing,iu1_power4)") + +(define_reservation "iq_power4" + "(du1_power4|du2_power4|du3_power4|du4_power4),\ + (iu1_power4|iu2_power4)") + +(define_reservation "fpq_power4" + "(du1_power4|du2_power4|du3_power4|du4_power4),\ + (fpu1_power4|fpu2_power4)") + +(define_reservation "vq_power4" + "(du1_power4,vec_power4)\ + |(du2_power4,vec_power4)\ + |(du3_power4,vec_power4)\ + |(du4_power4,vec_power4)") + +(define_reservation "vpq_power4" + "(du1_power4,vecperm_power4)\ + |(du2_power4,vecperm_power4)\ + |(du3_power4,vecperm_power4)\ + |(du4_power4,vecperm_power4)") + + +; Dispatch slots are allocated in order conforming to program order. +(absence_set "du1_power4" "du2_power4,du3_power4,du4_power4,du5_power4") +(absence_set "du2_power4" "du3_power4,du4_power4,du5_power4") +(absence_set "du3_power4" "du4_power4,du5_power4") +(absence_set "du4_power4" "du5_power4") + + +; Load/store +(define_insn_reservation "power4-load" 4 ; 3 + (and (eq_attr "type" "load") + (eq_attr "cpu" "power4")) + "lsq_power4") + +(define_insn_reservation "power4-load-ext" 5 + (and (eq_attr "type" "load_ext") + (eq_attr "cpu" "power4")) + "(du1_power4+du2_power4,lsu1_power4\ + |du2_power4+du3_power4,lsu2_power4\ + |du3_power4+du4_power4,lsu2_power4),\ + nothing,nothing,\ + (iu2_power4|iu1_power4)") + +(define_insn_reservation "power4-load-ext-update" 5 + (and (eq_attr "type" "load_ext_u") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4+du3_power4+du4_power4,\ + lsu1_power4+iu2_power4,nothing,nothing,iu2_power4") + +(define_insn_reservation "power4-load-ext-update-indexed" 5 + (and (eq_attr "type" "load_ext_ux") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4+du3_power4+du4_power4,\ + iu1_power4,lsu2_power4+iu1_power4,nothing,nothing,iu2_power4") + +(define_insn_reservation "power4-load-update-indexed" 3 + (and (eq_attr "type" "load_ux") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4+du3_power4+du4_power4,\ + iu1_power4,lsu2_power4+iu2_power4") + +(define_insn_reservation "power4-load-update" 4 ; 3 + (and (eq_attr "type" "load_u") + (eq_attr "cpu" "power4")) + "lsuq_power4") + +(define_insn_reservation "power4-fpload" 6 ; 5 + (and (eq_attr "type" "fpload") + (eq_attr "cpu" "power4")) + "lsq_power4") + +(define_insn_reservation "power4-fpload-update" 6 ; 5 + (and (eq_attr "type" "fpload_u,fpload_ux") + (eq_attr "cpu" "power4")) + "lsuq_power4") + +(define_insn_reservation "power4-vecload" 6 ; 5 + (and (eq_attr "type" "vecload") + (eq_attr "cpu" "power4")) + "lsq_power4") + +(define_insn_reservation "power4-store" 12 + (and (eq_attr "type" "store") + (eq_attr "cpu" "power4")) + "((du1_power4,lsu1_power4)\ + |(du2_power4,lsu2_power4)\ + |(du3_power4,lsu2_power4)\ + |(du4_power4,lsu1_power4)),\ + (iu1_power4|iu2_power4)") + +(define_insn_reservation "power4-store-update" 12 + (and (eq_attr "type" "store_u") + (eq_attr "cpu" "power4")) + "((du1_power4+du2_power4,lsu1_power4)\ + |(du2_power4+du3_power4,lsu2_power4)\ + |(du3_power4+du4_power4,lsu2_power4)\ + |(du3_power4+du4_power4,lsu2_power4))+\ + ((nothing,iu2_power4,iu1_power4)\ + |(nothing,iu2_power4,iu2_power4)\ + |(nothing,iu1_power4,iu2_power4)\ + |(nothing,iu1_power4,iu2_power4))") + +(define_insn_reservation "power4-store-update-indexed" 12 + (and (eq_attr "type" "store_ux") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4+du3_power4+du4_power4,\ + iu1_power4,lsu2_power4+iu2_power4,iu2_power4") + +(define_insn_reservation "power4-fpstore" 12 + (and (eq_attr "type" "fpstore") + (eq_attr "cpu" "power4")) + "((du1_power4,lsu1_power4)\ + |(du2_power4,lsu2_power4)\ + |(du3_power4,lsu2_power4)\ + |(du4_power4,lsu1_power4)),\ + (fpu1_power4|fpu2_power4)") + +(define_insn_reservation "power4-fpstore-update" 12 + (and (eq_attr "type" "fpstore_u,fpstore_ux") + (eq_attr "cpu" "power4")) + "((du1_power4+du2_power4,lsu1_power4)\ + |(du2_power4+du3_power4,lsu2_power4)\ + |(du3_power4+du4_power4,lsu2_power4))\ + +(nothing,(iu1_power4|iu2_power4),(fpu1_power4|fpu2_power4))") + +(define_insn_reservation "power4-vecstore" 12 + (and (eq_attr "type" "vecstore") + (eq_attr "cpu" "power4")) + "(du1_power4,lsu1_power4,vec_power4)\ + |(du2_power4,lsu2_power4,vec_power4)\ + |(du3_power4,lsu2_power4,vec_power4)\ + |(du4_power4,lsu1_power4,vec_power4)") + +(define_insn_reservation "power4-llsc" 11 + (and (eq_attr "type" "load_l,store_c,sync") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4+du3_power4+du4_power4,lsu1_power4") + + +; Integer latency is 2 cycles +(define_insn_reservation "power4-integer" 2 + (and (eq_attr "type" "integer,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "power4")) + "iq_power4") + +(define_insn_reservation "power4-two" 2 + (and (eq_attr "type" "two") + (eq_attr "cpu" "power4")) + "((du1_power4+du2_power4)\ + |(du2_power4+du3_power4)\ + |(du3_power4+du4_power4)\ + |(du4_power4+du1_power4)),\ + ((iu1_power4,nothing,iu2_power4)\ + |(iu2_power4,nothing,iu2_power4)\ + |(iu2_power4,nothing,iu1_power4)\ + |(iu1_power4,nothing,iu1_power4))") + +(define_insn_reservation "power4-three" 2 + (and (eq_attr "type" "three") + (eq_attr "cpu" "power4")) + "(du1_power4+du2_power4+du3_power4|du2_power4+du3_power4+du4_power4\ + |du3_power4+du4_power4+du1_power4|du4_power4+du1_power4+du2_power4),\ + ((iu1_power4,nothing,iu2_power4,nothing,iu2_power4)\ + |(iu2_power4,nothing,iu2_power4,nothing,iu1_power4)\ + |(iu2_power4,nothing,iu1_power4,nothing,iu1_power4)\ + |(iu1_power4,nothing,iu2_power4,nothing,iu2_power4))") + +(define_insn_reservation "power4-insert" 4 + (and (eq_attr "type" "insert_word") + (eq_attr "cpu" "power4")) + "(du1_power4+du2_power4|du2_power4+du3_power4|du3_power4+du4_power4),\ + ((iu1_power4,nothing,iu2_power4)\ + |(iu2_power4,nothing,iu2_power4)\ + |(iu2_power4,nothing,iu1_power4))") + +(define_insn_reservation "power4-cmp" 3 + (and (eq_attr "type" "cmp,fast_compare") + (eq_attr "cpu" "power4")) + "iq_power4") + +(define_insn_reservation "power4-compare" 2 + (and (eq_attr "type" "compare,delayed_compare,var_delayed_compare") + (eq_attr "cpu" "power4")) + "(du1_power4+du2_power4|du2_power4+du3_power4|du3_power4+du4_power4),\ + ((iu1_power4,iu2_power4)\ + |(iu2_power4,iu2_power4)\ + |(iu2_power4,iu1_power4))") + +(define_bypass 4 "power4-compare" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf") + +(define_insn_reservation "power4-lmul-cmp" 7 + (and (eq_attr "type" "lmul_compare") + (eq_attr "cpu" "power4")) + "(du1_power4+du2_power4|du2_power4+du3_power4|du3_power4+du4_power4),\ + ((iu1_power4*6,iu2_power4)\ + |(iu2_power4*6,iu2_power4)\ + |(iu2_power4*6,iu1_power4))") + +(define_bypass 10 "power4-lmul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf") + +(define_insn_reservation "power4-imul-cmp" 5 + (and (eq_attr "type" "imul_compare") + (eq_attr "cpu" "power4")) + "(du1_power4+du2_power4|du2_power4+du3_power4|du3_power4+du4_power4),\ + ((iu1_power4*4,iu2_power4)\ + |(iu2_power4*4,iu2_power4)\ + |(iu2_power4*4,iu1_power4))") + +(define_bypass 8 "power4-imul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf") + +(define_insn_reservation "power4-lmul" 7 + (and (eq_attr "type" "lmul") + (eq_attr "cpu" "power4")) + "(du1_power4|du2_power4|du3_power4|du4_power4),\ + (iu1_power4*6|iu2_power4*6)") + +(define_insn_reservation "power4-imul" 5 + (and (eq_attr "type" "imul") + (eq_attr "cpu" "power4")) + "(du1_power4|du2_power4|du3_power4|du4_power4),\ + (iu1_power4*4|iu2_power4*4)") + +(define_insn_reservation "power4-imul3" 4 + (and (eq_attr "type" "imul2,imul3") + (eq_attr "cpu" "power4")) + "(du1_power4|du2_power4|du3_power4|du4_power4),\ + (iu1_power4*3|iu2_power4*3)") + + +; SPR move only executes in first IU. +; Integer division only executes in second IU. +(define_insn_reservation "power4-idiv" 36 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4,iu2_power4*35") + +(define_insn_reservation "power4-ldiv" 68 + (and (eq_attr "type" "ldiv") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4,iu2_power4*67") + + +(define_insn_reservation "power4-mtjmpr" 3 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (eq_attr "cpu" "power4")) + "du1_power4,bpu_power4") + + +; Branches take dispatch Slot 4. The presence_sets prevent other insn from +; grabbing previous dispatch slots once this is assigned. +(define_insn_reservation "power4-branch" 2 + (and (eq_attr "type" "jmpreg,branch") + (eq_attr "cpu" "power4")) + "(du5_power4\ + |du4_power4+du5_power4\ + |du3_power4+du4_power4+du5_power4\ + |du2_power4+du3_power4+du4_power4+du5_power4\ + |du1_power4+du2_power4+du3_power4+du4_power4+du5_power4),bpu_power4") + + +; Condition Register logical ops are split if non-destructive (RT != RB) +(define_insn_reservation "power4-crlogical" 2 + (and (eq_attr "type" "cr_logical") + (eq_attr "cpu" "power4")) + "du1_power4,cru_power4") + +(define_insn_reservation "power4-delayedcr" 4 + (and (eq_attr "type" "delayed_cr") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4,cru_power4,cru_power4") + +; 4 mfcrf (each 3 cyc, 1/cyc) + 3 fxu +(define_insn_reservation "power4-mfcr" 6 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4+du3_power4+du4_power4,\ + du1_power4+du2_power4+du3_power4+du4_power4+cru_power4,\ + cru_power4,cru_power4,cru_power4") + +; mfcrf (1 field) +(define_insn_reservation "power4-mfcrf" 3 + (and (eq_attr "type" "mfcrf") + (eq_attr "cpu" "power4")) + "du1_power4,cru_power4") + +; mtcrf (1 field) +(define_insn_reservation "power4-mtcr" 4 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "power4")) + "du1_power4,iu1_power4") + +; Basic FP latency is 6 cycles +(define_insn_reservation "power4-fp" 6 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "power4")) + "fpq_power4") + +(define_insn_reservation "power4-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "power4")) + "fpq_power4") + +(define_insn_reservation "power4-sdiv" 33 + (and (eq_attr "type" "sdiv,ddiv") + (eq_attr "cpu" "power4")) + "(du1_power4|du2_power4|du3_power4|du4_power4),\ + (fpu1_power4*28|fpu2_power4*28)") + +(define_insn_reservation "power4-sqrt" 40 + (and (eq_attr "type" "ssqrt,dsqrt") + (eq_attr "cpu" "power4")) + "(du1_power4|du2_power4|du3_power4|du4_power4),\ + (fpu1_power4*35|fpu2_power4*35)") + +(define_insn_reservation "power4-isync" 2 + (and (eq_attr "type" "isync") + (eq_attr "cpu" "power4")) + "du1_power4+du2_power4+du3_power4+du4_power4,lsu1_power4") + + +; VMX +(define_insn_reservation "power4-vecsimple" 2 + (and (eq_attr "type" "vecsimple") + (eq_attr "cpu" "power4")) + "vq_power4") + +(define_insn_reservation "power4-veccomplex" 5 + (and (eq_attr "type" "veccomplex") + (eq_attr "cpu" "power4")) + "vq_power4") + +; vecfp compare +(define_insn_reservation "power4-veccmp" 8 + (and (eq_attr "type" "veccmp") + (eq_attr "cpu" "power4")) + "vq_power4") + +(define_insn_reservation "power4-vecfloat" 8 + (and (eq_attr "type" "vecfloat") + (eq_attr "cpu" "power4")) + "vq_power4") + +(define_insn_reservation "power4-vecperm" 2 + (and (eq_attr "type" "vecperm") + (eq_attr "cpu" "power4")) + "vpq_power4") + +(define_bypass 4 "power4-vecload" "power4-vecperm") + +(define_bypass 3 "power4-vecsimple" "power4-vecperm") +(define_bypass 6 "power4-veccomplex" "power4-vecperm") +(define_bypass 3 "power4-vecperm" + "power4-vecsimple,power4-veccomplex,power4-vecfloat") +(define_bypass 9 "power4-vecfloat" "power4-vecperm") + +(define_bypass 5 "power4-vecsimple,power4-veccomplex" + "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf") + +(define_bypass 4 "power4-vecsimple,power4-vecperm" "power4-vecstore") +(define_bypass 7 "power4-veccomplex" "power4-vecstore") +(define_bypass 10 "power4-vecfloat" "power4-vecstore") diff --git a/gcc/config/rs6000/power5.md b/gcc/config/rs6000/power5.md new file mode 100644 index 000000000..b6db09312 --- /dev/null +++ b/gcc/config/rs6000/power5.md @@ -0,0 +1,308 @@ +;; Scheduling description for IBM POWER5 processor. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; Sources: IBM Red Book and White Paper on POWER5 + +;; The POWER5 has 2 iu, 2 fpu, 2 lsu per engine (2 engines per chip). +;; Instructions that update more than one register get broken into two +;; (split) or more internal ops. The chip can issue up to 5 +;; internal ops per cycle. + +(define_automaton "power5iu,power5fpu,power5misc") + +(define_cpu_unit "iu1_power5,iu2_power5" "power5iu") +(define_cpu_unit "lsu1_power5,lsu2_power5" "power5misc") +(define_cpu_unit "fpu1_power5,fpu2_power5" "power5fpu") +(define_cpu_unit "bpu_power5,cru_power5" "power5misc") +(define_cpu_unit "du1_power5,du2_power5,du3_power5,du4_power5,du5_power5" + "power5misc") + +(define_reservation "lsq_power5" + "(du1_power5,lsu1_power5)\ + |(du2_power5,lsu2_power5)\ + |(du3_power5,lsu2_power5)\ + |(du4_power5,lsu1_power5)") + +(define_reservation "iq_power5" + "(du1_power5|du2_power5|du3_power5|du4_power5),\ + (iu1_power5|iu2_power5)") + +(define_reservation "fpq_power5" + "(du1_power5|du2_power5|du3_power5|du4_power5),\ + (fpu1_power5|fpu2_power5)") + +; Dispatch slots are allocated in order conforming to program order. +(absence_set "du1_power5" "du2_power5,du3_power5,du4_power5,du5_power5") +(absence_set "du2_power5" "du3_power5,du4_power5,du5_power5") +(absence_set "du3_power5" "du4_power5,du5_power5") +(absence_set "du4_power5" "du5_power5") + + +; Load/store +(define_insn_reservation "power5-load" 4 ; 3 + (and (eq_attr "type" "load") + (eq_attr "cpu" "power5")) + "lsq_power5") + +(define_insn_reservation "power5-load-ext" 5 + (and (eq_attr "type" "load_ext") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,lsu1_power5,nothing,nothing,iu2_power5") + +(define_insn_reservation "power5-load-ext-update" 5 + (and (eq_attr "type" "load_ext_u") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5+du3_power5+du4_power5,\ + lsu1_power5+iu2_power5,nothing,nothing,iu2_power5") + +(define_insn_reservation "power5-load-ext-update-indexed" 5 + (and (eq_attr "type" "load_ext_ux") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5+du3_power5+du4_power5,\ + iu1_power5,lsu2_power5+iu1_power5,nothing,nothing,iu2_power5") + +(define_insn_reservation "power5-load-update-indexed" 3 + (and (eq_attr "type" "load_ux") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5+du3_power5+du4_power5,\ + iu1_power5,lsu2_power5+iu2_power5") + +(define_insn_reservation "power5-load-update" 4 ; 3 + (and (eq_attr "type" "load_u") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,lsu1_power5+iu2_power5") + +(define_insn_reservation "power5-fpload" 6 ; 5 + (and (eq_attr "type" "fpload") + (eq_attr "cpu" "power5")) + "lsq_power5") + +(define_insn_reservation "power5-fpload-update" 6 ; 5 + (and (eq_attr "type" "fpload_u,fpload_ux") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,lsu1_power5+iu2_power5") + +(define_insn_reservation "power5-store" 12 + (and (eq_attr "type" "store") + (eq_attr "cpu" "power5")) + "((du1_power5,lsu1_power5)\ + |(du2_power5,lsu2_power5)\ + |(du3_power5,lsu2_power5)\ + |(du4_power5,lsu1_power5)),\ + (iu1_power5|iu2_power5)") + +(define_insn_reservation "power5-store-update" 12 + (and (eq_attr "type" "store_u") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,lsu1_power5+iu2_power5,iu1_power5") + +(define_insn_reservation "power5-store-update-indexed" 12 + (and (eq_attr "type" "store_ux") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5+du3_power5+du4_power5,\ + iu1_power5,lsu2_power5+iu2_power5,iu2_power5") + +(define_insn_reservation "power5-fpstore" 12 + (and (eq_attr "type" "fpstore") + (eq_attr "cpu" "power5")) + "((du1_power5,lsu1_power5)\ + |(du2_power5,lsu2_power5)\ + |(du3_power5,lsu2_power5)\ + |(du4_power5,lsu1_power5)),\ + (fpu1_power5|fpu2_power5)") + +(define_insn_reservation "power5-fpstore-update" 12 + (and (eq_attr "type" "fpstore_u,fpstore_ux") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,lsu1_power5+iu2_power5,fpu1_power5") + +(define_insn_reservation "power5-llsc" 11 + (and (eq_attr "type" "load_l,store_c,sync") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5+du3_power5+du4_power5,\ + lsu1_power5") + + +; Integer latency is 2 cycles +(define_insn_reservation "power5-integer" 2 + (and (eq_attr "type" "integer,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "power5")) + "iq_power5") + +(define_insn_reservation "power5-two" 2 + (and (eq_attr "type" "two") + (eq_attr "cpu" "power5")) + "((du1_power5+du2_power5)\ + |(du2_power5+du3_power5)\ + |(du3_power5+du4_power5)\ + |(du4_power5+du1_power5)),\ + ((iu1_power5,nothing,iu2_power5)\ + |(iu2_power5,nothing,iu2_power5)\ + |(iu2_power5,nothing,iu1_power5)\ + |(iu1_power5,nothing,iu1_power5))") + +(define_insn_reservation "power5-three" 2 + (and (eq_attr "type" "three") + (eq_attr "cpu" "power5")) + "(du1_power5+du2_power5+du3_power5|du2_power5+du3_power5+du4_power5\ + |du3_power5+du4_power5+du1_power5|du4_power5+du1_power5+du2_power5),\ + ((iu1_power5,nothing,iu2_power5,nothing,iu2_power5)\ + |(iu2_power5,nothing,iu2_power5,nothing,iu1_power5)\ + |(iu2_power5,nothing,iu1_power5,nothing,iu1_power5)\ + |(iu1_power5,nothing,iu2_power5,nothing,iu2_power5))") + +(define_insn_reservation "power5-insert" 4 + (and (eq_attr "type" "insert_word") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,iu1_power5,nothing,iu2_power5") + +(define_insn_reservation "power5-cmp" 3 + (and (eq_attr "type" "cmp,fast_compare") + (eq_attr "cpu" "power5")) + "iq_power5") + +(define_insn_reservation "power5-compare" 2 + (and (eq_attr "type" "compare,delayed_compare,var_delayed_compare") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,iu1_power5,iu2_power5") + +(define_bypass 4 "power5-compare" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf") + +(define_insn_reservation "power5-lmul-cmp" 7 + (and (eq_attr "type" "lmul_compare") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,iu1_power5*6,iu2_power5") + +(define_bypass 10 "power5-lmul-cmp" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf") + +(define_insn_reservation "power5-imul-cmp" 5 + (and (eq_attr "type" "imul_compare") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,iu1_power5*4,iu2_power5") + +(define_bypass 8 "power5-imul-cmp" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf") + +(define_insn_reservation "power5-lmul" 7 + (and (eq_attr "type" "lmul") + (eq_attr "cpu" "power5")) + "(du1_power5|du2_power5|du3_power5|du4_power5),(iu1_power5*6|iu2_power5*6)") + +(define_insn_reservation "power5-imul" 5 + (and (eq_attr "type" "imul") + (eq_attr "cpu" "power5")) + "(du1_power5|du2_power5|du3_power5|du4_power5),(iu1_power5*4|iu2_power5*4)") + +(define_insn_reservation "power5-imul3" 4 + (and (eq_attr "type" "imul2,imul3") + (eq_attr "cpu" "power5")) + "(du1_power5|du2_power5|du3_power5|du4_power5),(iu1_power5*3|iu2_power5*3)") + + +; SPR move only executes in first IU. +; Integer division only executes in second IU. +(define_insn_reservation "power5-idiv" 36 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,iu2_power5*35") + +(define_insn_reservation "power5-ldiv" 68 + (and (eq_attr "type" "ldiv") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,iu2_power5*67") + + +(define_insn_reservation "power5-mtjmpr" 3 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (eq_attr "cpu" "power5")) + "du1_power5,bpu_power5") + + +; Branches take dispatch Slot 4. The presence_sets prevent other insn from +; grabbing previous dispatch slots once this is assigned. +(define_insn_reservation "power5-branch" 2 + (and (eq_attr "type" "jmpreg,branch") + (eq_attr "cpu" "power5")) + "(du5_power5\ + |du4_power5+du5_power5\ + |du3_power5+du4_power5+du5_power5\ + |du2_power5+du3_power5+du4_power5+du5_power5\ + |du1_power5+du2_power5+du3_power5+du4_power5+du5_power5),bpu_power5") + + +; Condition Register logical ops are split if non-destructive (RT != RB) +(define_insn_reservation "power5-crlogical" 2 + (and (eq_attr "type" "cr_logical") + (eq_attr "cpu" "power5")) + "du1_power5,cru_power5") + +(define_insn_reservation "power5-delayedcr" 4 + (and (eq_attr "type" "delayed_cr") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5,cru_power5,cru_power5") + +; 4 mfcrf (each 3 cyc, 1/cyc) + 3 fxu +(define_insn_reservation "power5-mfcr" 6 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5+du3_power5+du4_power5,\ + du1_power5+du2_power5+du3_power5+du4_power5+cru_power5,\ + cru_power5,cru_power5,cru_power5") + +; mfcrf (1 field) +(define_insn_reservation "power5-mfcrf" 3 + (and (eq_attr "type" "mfcrf") + (eq_attr "cpu" "power5")) + "du1_power5,cru_power5") + +; mtcrf (1 field) +(define_insn_reservation "power5-mtcr" 4 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "power5")) + "du1_power5,iu1_power5") + +; Basic FP latency is 6 cycles +(define_insn_reservation "power5-fp" 6 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "power5")) + "fpq_power5") + +(define_insn_reservation "power5-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "power5")) + "fpq_power5") + +(define_insn_reservation "power5-sdiv" 33 + (and (eq_attr "type" "sdiv,ddiv") + (eq_attr "cpu" "power5")) + "(du1_power5|du2_power5|du3_power5|du4_power5),\ + (fpu1_power5*28|fpu2_power5*28)") + +(define_insn_reservation "power5-sqrt" 40 + (and (eq_attr "type" "ssqrt,dsqrt") + (eq_attr "cpu" "power5")) + "(du1_power5|du2_power5|du3_power5|du4_power5),\ + (fpu1_power5*35|fpu2_power5*35)") + +(define_insn_reservation "power5-isync" 2 + (and (eq_attr "type" "isync") + (eq_attr "cpu" "power5")) + "du1_power5+du2_power5+du3_power5+du4_power5,\ + lsu1_power5") + diff --git a/gcc/config/rs6000/power6.md b/gcc/config/rs6000/power6.md new file mode 100644 index 000000000..8d54c8129 --- /dev/null +++ b/gcc/config/rs6000/power6.md @@ -0,0 +1,573 @@ +;; Scheduling description for IBM POWER6 processor. +;; Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc. +;; Contributed by Peter Steinmetz (steinmtz@us.ibm.com) +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; Sources: + +;; The POWER6 has 2 iu, 2 fpu, 2 lsu, and 1 bu/cru unit per engine +;; (2 engines per chip). The chip can issue up to 5 internal ops +;; per cycle. + +(define_automaton "power6iu,power6lsu,power6fpu,power6bu") + +(define_cpu_unit "iu1_power6,iu2_power6" "power6iu") +(define_cpu_unit "lsu1_power6,lsu2_power6" "power6lsu") +(define_cpu_unit "bpu_power6" "power6bu") +(define_cpu_unit "fpu1_power6,fpu2_power6" "power6fpu") + +(define_reservation "LS2_power6" + "lsu1_power6+lsu2_power6") + +(define_reservation "FPU_power6" + "fpu1_power6|fpu2_power6") + +(define_reservation "BRU_power6" + "bpu_power6") + +(define_reservation "LSU_power6" + "lsu1_power6|lsu2_power6") + +(define_reservation "LSF_power6" + "(lsu1_power6+fpu1_power6)\ + |(lsu1_power6+fpu2_power6)\ + |(lsu2_power6+fpu1_power6)\ + |(lsu2_power6+fpu2_power6)") + +(define_reservation "LX2_power6" + "(iu1_power6+iu2_power6+lsu1_power6)\ + |(iu1_power6+iu2_power6+lsu2_power6)") + +(define_reservation "FX2_power6" + "iu1_power6+iu2_power6") + +(define_reservation "X2F_power6" + "(iu1_power6+iu2_power6+fpu1_power6)\ + |(iu1_power6+iu2_power6+fpu2_power6)") + +(define_reservation "BX2_power6" + "iu1_power6+iu2_power6+bpu_power6") + +(define_reservation "LSX_power6" + "(iu1_power6+lsu1_power6)\ + |(iu1_power6+lsu2_power6)\ + |(iu2_power6+lsu1_power6)\ + |(iu2_power6+lsu2_power6)") + +(define_reservation "FXU_power6" + "iu1_power6|iu2_power6") + +(define_reservation "XLF_power6" + "(iu1_power6+lsu1_power6+fpu1_power6)\ + |(iu1_power6+lsu1_power6+fpu2_power6)\ + |(iu1_power6+lsu2_power6+fpu1_power6)\ + |(iu1_power6+lsu2_power6+fpu2_power6)\ + |(iu2_power6+lsu1_power6+fpu1_power6)\ + |(iu2_power6+lsu1_power6+fpu2_power6)\ + |(iu2_power6+lsu2_power6+fpu1_power6)\ + |(iu2_power6+lsu2_power6+fpu2_power6)") + +(define_reservation "BRX_power6" + "(bpu_power6+iu1_power6)\ + |(bpu_power6+iu2_power6)") + +; Load/store + +; The default for a value written by a fixed point load +; that is read/written by a subsequent fixed point op. +(define_insn_reservation "power6-load" 2 ; fx + (and (eq_attr "type" "load") + (eq_attr "cpu" "power6")) + "LSU_power6") + +; define the bypass for the case where the value written +; by a fixed point load is used as the source value on +; a store. +(define_bypass 1 "power6-load,\ + power6-load-update,\ + power6-load-update-indexed" + "power6-store,\ + power6-store-update,\ + power6-store-update-indexed,\ + power6-fpstore,\ + power6-fpstore-update" + "store_data_bypass_p") + +(define_insn_reservation "power6-load-ext" 4 ; fx + (and (eq_attr "type" "load_ext") + (eq_attr "cpu" "power6")) + "LSU_power6") + +; define the bypass for the case where the value written +; by a fixed point load ext is used as the source value on +; a store. +(define_bypass 1 "power6-load-ext,\ + power6-load-ext-update,\ + power6-load-ext-update-indexed" + "power6-store,\ + power6-store-update,\ + power6-store-update-indexed,\ + power6-fpstore,\ + power6-fpstore-update" + "store_data_bypass_p") + +(define_insn_reservation "power6-load-update" 2 ; fx + (and (eq_attr "type" "load_u") + (eq_attr "cpu" "power6")) + "LSX_power6") + +(define_insn_reservation "power6-load-update-indexed" 2 ; fx + (and (eq_attr "type" "load_ux") + (eq_attr "cpu" "power6")) + "LSX_power6") + +(define_insn_reservation "power6-load-ext-update" 4 ; fx + (and (eq_attr "type" "load_ext_u") + (eq_attr "cpu" "power6")) + "LSX_power6") + +(define_insn_reservation "power6-load-ext-update-indexed" 4 ; fx + (and (eq_attr "type" "load_ext_ux") + (eq_attr "cpu" "power6")) + "LSX_power6") + +(define_insn_reservation "power6-fpload" 1 + (and (eq_attr "type" "fpload") + (eq_attr "cpu" "power6")) + "LSU_power6") + +(define_insn_reservation "power6-fpload-update" 1 + (and (eq_attr "type" "fpload_u,fpload_ux") + (eq_attr "cpu" "power6")) + "LSX_power6") + +(define_insn_reservation "power6-store" 14 + (and (eq_attr "type" "store") + (eq_attr "cpu" "power6")) + "LSU_power6") + +(define_insn_reservation "power6-store-update" 14 + (and (eq_attr "type" "store_u") + (eq_attr "cpu" "power6")) + "LSX_power6") + +(define_insn_reservation "power6-store-update-indexed" 14 + (and (eq_attr "type" "store_ux") + (eq_attr "cpu" "power6")) + "LX2_power6") + +(define_insn_reservation "power6-fpstore" 14 + (and (eq_attr "type" "fpstore") + (eq_attr "cpu" "power6")) + "LSF_power6") + +(define_insn_reservation "power6-fpstore-update" 14 + (and (eq_attr "type" "fpstore_u,fpstore_ux") + (eq_attr "cpu" "power6")) + "XLF_power6") + +(define_insn_reservation "power6-larx" 3 + (and (eq_attr "type" "load_l") + (eq_attr "cpu" "power6")) + "LS2_power6") + +(define_insn_reservation "power6-stcx" 10 ; best case + (and (eq_attr "type" "store_c") + (eq_attr "cpu" "power6")) + "LSX_power6") + +(define_insn_reservation "power6-sync" 11 ; N/A + (and (eq_attr "type" "sync") + (eq_attr "cpu" "power6")) + "LSU_power6") + +(define_insn_reservation "power6-integer" 1 + (and (eq_attr "type" "integer") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-isel" 1 + (and (eq_attr "type" "isel") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-exts" 1 + (and (eq_attr "type" "exts") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-shift" 1 + (and (eq_attr "type" "shift") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-insert" 1 + (and (eq_attr "type" "insert_word") + (eq_attr "cpu" "power6")) + "FX2_power6") + +(define_insn_reservation "power6-insert-dword" 1 + (and (eq_attr "type" "insert_dword") + (eq_attr "cpu" "power6")) + "FX2_power6") + +; define the bypass for the case where the value written +; by a fixed point op is used as the source value on a +; store. +(define_bypass 1 "power6-integer,\ + power6-exts,\ + power6-shift,\ + power6-insert,\ + power6-insert-dword" + "power6-store,\ + power6-store-update,\ + power6-store-update-indexed,\ + power6-fpstore,\ + power6-fpstore-update" + "store_data_bypass_p") + +(define_insn_reservation "power6-cntlz" 2 + (and (eq_attr "type" "cntlz") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_bypass 1 "power6-cntlz" + "power6-store,\ + power6-store-update,\ + power6-store-update-indexed,\ + power6-fpstore,\ + power6-fpstore-update" + "store_data_bypass_p") + +(define_insn_reservation "power6-var-rotate" 4 + (and (eq_attr "type" "var_shift_rotate") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-trap" 1 ; N/A + (and (eq_attr "type" "trap") + (eq_attr "cpu" "power6")) + "BRX_power6") + +(define_insn_reservation "power6-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "power6")) + "(iu1_power6,iu1_power6)\ + |(iu1_power6+iu2_power6,nothing)\ + |(iu1_power6,iu2_power6)\ + |(iu2_power6,iu1_power6)\ + |(iu2_power6,iu2_power6)") + +(define_insn_reservation "power6-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "power6")) + "(iu1_power6,iu1_power6,iu1_power6)\ + |(iu1_power6,iu1_power6,iu2_power6)\ + |(iu1_power6,iu2_power6,iu1_power6)\ + |(iu1_power6,iu2_power6,iu2_power6)\ + |(iu2_power6,iu1_power6,iu1_power6)\ + |(iu2_power6,iu1_power6,iu2_power6)\ + |(iu2_power6,iu2_power6,iu1_power6)\ + |(iu2_power6,iu2_power6,iu2_power6)\ + |(iu1_power6+iu2_power6,iu1_power6)\ + |(iu1_power6+iu2_power6,iu2_power6)\ + |(iu1_power6,iu1_power6+iu2_power6)\ + |(iu2_power6,iu1_power6+iu2_power6)") + +(define_insn_reservation "power6-cmp" 1 + (and (eq_attr "type" "cmp") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-compare" 1 + (and (eq_attr "type" "compare") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-fast-compare" 1 + (and (eq_attr "type" "fast_compare") + (eq_attr "cpu" "power6")) + "FXU_power6") + +; define the bypass for the case where the value written +; by a fixed point rec form op is used as the source value +; on a store. +(define_bypass 1 "power6-compare,\ + power6-fast-compare" + "power6-store,\ + power6-store-update,\ + power6-store-update-indexed,\ + power6-fpstore,\ + power6-fpstore-update" + "store_data_bypass_p") + +(define_insn_reservation "power6-delayed-compare" 2 ; N/A + (and (eq_attr "type" "delayed_compare") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-var-delayed-compare" 4 + (and (eq_attr "type" "var_delayed_compare") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-lmul-cmp" 16 + (and (eq_attr "type" "lmul_compare") + (eq_attr "cpu" "power6")) + "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\ + |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)"); + +(define_insn_reservation "power6-imul-cmp" 16 + (and (eq_attr "type" "imul_compare") + (eq_attr "cpu" "power6")) + "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\ + |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)"); + +(define_insn_reservation "power6-lmul" 16 + (and (eq_attr "type" "lmul") + (eq_attr "cpu" "power6")) + "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\ + |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)"); + +(define_insn_reservation "power6-imul" 16 + (and (eq_attr "type" "imul") + (eq_attr "cpu" "power6")) + "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\ + |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)"); + +(define_insn_reservation "power6-imul3" 16 + (and (eq_attr "type" "imul2,imul3") + (eq_attr "cpu" "power6")) + "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\ + |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)"); + +(define_bypass 9 "power6-imul,\ + power6-lmul,\ + power6-imul-cmp,\ + power6-lmul-cmp,\ + power6-imul3" + "power6-store,\ + power6-store-update,\ + power6-store-update-indexed,\ + power6-fpstore,\ + power6-fpstore-update" + "store_data_bypass_p") + +(define_insn_reservation "power6-idiv" 44 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "power6")) + "(iu1_power6*44+iu2_power6*44+fpu1_power6*44)\ + |(iu1_power6*44+iu2_power6*44+fpu2_power6*44)"); + +; The latency for this bypass is yet to be defined +;(define_bypass ? "power6-idiv" +; "power6-store,\ +; power6-store-update,\ +; power6-store-update-indexed,\ +; power6-fpstore,\ +; power6-fpstore-update" +; "store_data_bypass_p") + +(define_insn_reservation "power6-ldiv" 56 + (and (eq_attr "type" "ldiv") + (eq_attr "cpu" "power6")) + "(iu1_power6*56+iu2_power6*56+fpu1_power6*56)\ + |(iu1_power6*56+iu2_power6*56+fpu2_power6*56)"); + +; The latency for this bypass is yet to be defined +;(define_bypass ? "power6-ldiv" +; "power6-store,\ +; power6-store-update,\ +; power6-store-update-indexed,\ +; power6-fpstore,\ +; power6-fpstore-update" +; "store_data_bypass_p") + +(define_insn_reservation "power6-mtjmpr" 2 + (and (eq_attr "type" "mtjmpr,mfjmpr") + (eq_attr "cpu" "power6")) + "BX2_power6") + +(define_bypass 5 "power6-mtjmpr" "power6-branch") + +(define_insn_reservation "power6-branch" 2 + (and (eq_attr "type" "jmpreg,branch") + (eq_attr "cpu" "power6")) + "BRU_power6") + +(define_bypass 5 "power6-branch" "power6-mtjmpr") + +(define_insn_reservation "power6-crlogical" 3 + (and (eq_attr "type" "cr_logical") + (eq_attr "cpu" "power6")) + "BRU_power6") + +(define_bypass 3 "power6-crlogical" "power6-branch") + +(define_insn_reservation "power6-delayedcr" 3 + (and (eq_attr "type" "delayed_cr") + (eq_attr "cpu" "power6")) + "BRU_power6") + +(define_insn_reservation "power6-mfcr" 6 ; N/A + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "power6")) + "BX2_power6") + +; mfcrf (1 field) +(define_insn_reservation "power6-mfcrf" 3 ; N/A + (and (eq_attr "type" "mfcrf") + (eq_attr "cpu" "power6")) + "BX2_power6") ; + +; mtcrf (1 field) +(define_insn_reservation "power6-mtcr" 4 ; N/A + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "power6")) + "BX2_power6") + +(define_bypass 9 "power6-mtcr" "power6-branch") + +(define_insn_reservation "power6-fp" 6 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "power6")) + "FPU_power6") + +; Any fp instruction that updates a CR has a latency +; of 6 to a dependent branch +(define_bypass 6 "power6-fp" "power6-branch") + +(define_bypass 1 "power6-fp" + "power6-fpstore,power6-fpstore-update" + "store_data_bypass_p") + +(define_insn_reservation "power6-fpcompare" 8 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_bypass 12 "power6-fpcompare" + "power6-branch,power6-crlogical") + +(define_insn_reservation "power6-sdiv" 26 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_insn_reservation "power6-ddiv" 32 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_insn_reservation "power6-sqrt" 30 + (and (eq_attr "type" "ssqrt") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_insn_reservation "power6-dsqrt" 42 + (and (eq_attr "type" "dsqrt") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_insn_reservation "power6-isync" 2 ; N/A + (and (eq_attr "type" "isync") + (eq_attr "cpu" "power6")) + "FXU_power6") + +(define_insn_reservation "power6-vecload" 1 + (and (eq_attr "type" "vecload") + (eq_attr "cpu" "power6")) + "LSU_power6") + +(define_insn_reservation "power6-vecstore" 1 + (and (eq_attr "type" "vecstore") + (eq_attr "cpu" "power6")) + "LSF_power6") + +(define_insn_reservation "power6-vecsimple" 3 + (and (eq_attr "type" "vecsimple") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_bypass 6 "power6-vecsimple" "power6-veccomplex,\ + power6-vecperm") + +(define_bypass 5 "power6-vecsimple" "power6-vecfloat") + +(define_bypass 4 "power6-vecsimple" "power6-vecstore" ) + +(define_insn_reservation "power6-veccmp" 1 + (and (eq_attr "type" "veccmp") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_bypass 10 "power6-veccmp" "power6-branch") + +(define_insn_reservation "power6-vecfloat" 7 + (and (eq_attr "type" "vecfloat") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_bypass 10 "power6-vecfloat" "power6-vecsimple") + +(define_bypass 11 "power6-vecfloat" "power6-veccomplex,\ + power6-vecperm") + +(define_bypass 9 "power6-vecfloat" "power6-vecstore" ) + +(define_insn_reservation "power6-veccomplex" 7 + (and (eq_attr "type" "vecsimple") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_bypass 10 "power6-veccomplex" "power6-vecsimple,\ + power6-vecfloat" ) + +(define_bypass 9 "power6-veccomplex" "power6-vecperm" ) + +(define_bypass 8 "power6-veccomplex" "power6-vecstore" ) + +(define_insn_reservation "power6-vecperm" 4 + (and (eq_attr "type" "vecperm") + (eq_attr "cpu" "power6")) + "FPU_power6") + +(define_bypass 7 "power6-vecperm" "power6-vecsimple,\ + power6-vecfloat" ) + +(define_bypass 6 "power6-vecperm" "power6-veccomplex" ) + +(define_bypass 5 "power6-vecperm" "power6-vecstore" ) + +(define_insn_reservation "power6-mftgpr" 8 + (and (eq_attr "type" "mftgpr") + (eq_attr "cpu" "power6")) + "X2F_power6") + +(define_insn_reservation "power6-mffgpr" 14 + (and (eq_attr "type" "mffgpr") + (eq_attr "cpu" "power6")) + "LX2_power6") + +(define_bypass 4 "power6-mftgpr" "power6-imul,\ + power6-lmul,\ + power6-imul-cmp,\ + power6-lmul-cmp,\ + power6-imul3,\ + power6-idiv,\ + power6-ldiv" ) diff --git a/gcc/config/rs6000/power7.md b/gcc/config/rs6000/power7.md new file mode 100644 index 000000000..148a7a52a --- /dev/null +++ b/gcc/config/rs6000/power7.md @@ -0,0 +1,318 @@ +;; Scheduling description for IBM POWER7 processor. +;; Copyright (C) 2009 Free Software Foundation, Inc. +;; +;; Contributed by Pat Haugen (pthaugen@us.ibm.com). + +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "power7iu,power7lsu,power7vsu,power7misc") + +(define_cpu_unit "iu1_power7,iu2_power7" "power7iu") +(define_cpu_unit "lsu1_power7,lsu2_power7" "power7lsu") +(define_cpu_unit "vsu1_power7,vsu2_power7" "power7vsu") +(define_cpu_unit "bpu_power7,cru_power7" "power7misc") +(define_cpu_unit "du1_power7,du2_power7,du3_power7,du4_power7,du5_power7" + "power7misc") + + +(define_reservation "DU_power7" + "du1_power7|du2_power7|du3_power7|du4_power7") + +(define_reservation "DU2F_power7" + "du1_power7+du2_power7") + +(define_reservation "DU4_power7" + "du1_power7+du2_power7+du3_power7+du4_power7") + +(define_reservation "FXU_power7" + "iu1_power7|iu2_power7") + +(define_reservation "VSU_power7" + "vsu1_power7|vsu2_power7") + +(define_reservation "LSU_power7" + "lsu1_power7|lsu2_power7") + + +; Dispatch slots are allocated in order conforming to program order. +(absence_set "du1_power7" "du2_power7,du3_power7,du4_power7,du5_power7") +(absence_set "du2_power7" "du3_power7,du4_power7,du5_power7") +(absence_set "du3_power7" "du4_power7,du5_power7") +(absence_set "du4_power7" "du5_power7") + + +; LS Unit +(define_insn_reservation "power7-load" 2 + (and (eq_attr "type" "load") + (eq_attr "cpu" "power7")) + "DU_power7,LSU_power7") + +(define_insn_reservation "power7-load-ext" 3 + (and (eq_attr "type" "load_ext") + (eq_attr "cpu" "power7")) + "DU2F_power7,LSU_power7,FXU_power7") + +(define_insn_reservation "power7-load-update" 2 + (and (eq_attr "type" "load_u") + (eq_attr "cpu" "power7")) + "DU2F_power7,LSU_power7+FXU_power7") + +(define_insn_reservation "power7-load-update-indexed" 3 + (and (eq_attr "type" "load_ux") + (eq_attr "cpu" "power7")) + "DU4_power7,FXU_power7,LSU_power7+FXU_power7") + +(define_insn_reservation "power7-load-ext-update" 4 + (and (eq_attr "type" "load_ext_u") + (eq_attr "cpu" "power7")) + "DU2F_power7,LSU_power7+FXU_power7,FXU_power7") + +(define_insn_reservation "power7-load-ext-update-indexed" 4 + (and (eq_attr "type" "load_ext_ux") + (eq_attr "cpu" "power7")) + "DU4_power7,FXU_power7,LSU_power7+FXU_power7,FXU_power7") + +(define_insn_reservation "power7-fpload" 3 + (and (eq_attr "type" "fpload") + (eq_attr "cpu" "power7")) + "DU_power7,LSU_power7") + +(define_insn_reservation "power7-fpload-update" 3 + (and (eq_attr "type" "fpload_u,fpload_ux") + (eq_attr "cpu" "power7")) + "DU2F_power7,LSU_power7+FXU_power7") + +(define_insn_reservation "power7-store" 6 ; store-forwarding latency + (and (eq_attr "type" "store") + (eq_attr "cpu" "power7")) + "DU_power7,LSU_power7+FXU_power7") + +(define_insn_reservation "power7-store-update" 6 + (and (eq_attr "type" "store_u") + (eq_attr "cpu" "power7")) + "DU2F_power7,LSU_power7+FXU_power7,FXU_power7") + +(define_insn_reservation "power7-store-update-indexed" 6 + (and (eq_attr "type" "store_ux") + (eq_attr "cpu" "power7")) + "DU4_power7,LSU_power7+FXU_power7,FXU_power7") + +(define_insn_reservation "power7-fpstore" 6 + (and (eq_attr "type" "fpstore") + (eq_attr "cpu" "power7")) + "DU_power7,LSU_power7+VSU_power7") + +(define_insn_reservation "power7-fpstore-update" 6 + (and (eq_attr "type" "fpstore_u,fpstore_ux") + (eq_attr "cpu" "power7")) + "DU_power7,LSU_power7+VSU_power7+FXU_power7") + +(define_insn_reservation "power7-larx" 3 + (and (eq_attr "type" "load_l") + (eq_attr "cpu" "power7")) + "DU4_power7,LSU_power7") + +(define_insn_reservation "power7-stcx" 10 + (and (eq_attr "type" "store_c") + (eq_attr "cpu" "power7")) + "DU4_power7,LSU_power7") + +(define_insn_reservation "power7-vecload" 3 + (and (eq_attr "type" "vecload") + (eq_attr "cpu" "power7")) + "DU_power7,LSU_power7") + +(define_insn_reservation "power7-vecstore" 6 + (and (eq_attr "type" "vecstore") + (eq_attr "cpu" "power7")) + "DU_power7,LSU_power7+VSU_power7") + +(define_insn_reservation "power7-sync" 11 + (and (eq_attr "type" "sync") + (eq_attr "cpu" "power7")) + "DU4_power7,LSU_power7") + + +; FX Unit +(define_insn_reservation "power7-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\ + var_shift_rotate,exts,isel") + (eq_attr "cpu" "power7")) + "DU_power7,FXU_power7") + +(define_insn_reservation "power7-cntlz" 2 + (and (eq_attr "type" "cntlz") + (eq_attr "cpu" "power7")) + "DU_power7,FXU_power7") + +(define_insn_reservation "power7-two" 2 + (and (eq_attr "type" "two") + (eq_attr "cpu" "power7")) + "DU_power7+DU_power7,FXU_power7,FXU_power7") + +(define_insn_reservation "power7-three" 3 + (and (eq_attr "type" "three") + (eq_attr "cpu" "power7")) + "DU_power7+DU_power7+DU_power7,FXU_power7,FXU_power7,FXU_power7") + +(define_insn_reservation "power7-cmp" 1 + (and (eq_attr "type" "cmp,fast_compare") + (eq_attr "cpu" "power7")) + "DU_power7,FXU_power7") + +(define_insn_reservation "power7-compare" 2 + (and (eq_attr "type" "compare,delayed_compare,var_delayed_compare") + (eq_attr "cpu" "power7")) + "DU2F_power7,FXU_power7,FXU_power7") + +(define_bypass 3 "power7-cmp,power7-compare" "power7-crlogical,power7-delayedcr") + +(define_insn_reservation "power7-mul" 4 + (and (eq_attr "type" "imul,imul2,imul3,lmul") + (eq_attr "cpu" "power7")) + "DU_power7,FXU_power7") + +(define_insn_reservation "power7-mul-compare" 5 + (and (eq_attr "type" "imul_compare,lmul_compare") + (eq_attr "cpu" "power7")) + "DU2F_power7,FXU_power7,nothing*3,FXU_power7") + +(define_insn_reservation "power7-idiv" 36 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "power7")) + "DU2F_power7,iu1_power7*36|iu2_power7*36") + +(define_insn_reservation "power7-ldiv" 68 + (and (eq_attr "type" "ldiv") + (eq_attr "cpu" "power7")) + "DU2F_power7,iu1_power7*68|iu2_power7*68") + +(define_insn_reservation "power7-isync" 1 ; + (and (eq_attr "type" "isync") + (eq_attr "cpu" "power7")) + "DU4_power7,FXU_power7") + + +; CR Unit +(define_insn_reservation "power7-mtjmpr" 4 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "power7")) + "du1_power7,FXU_power7") + +(define_insn_reservation "power7-mfjmpr" 5 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "power7")) + "du1_power7,cru_power7+FXU_power7") + +(define_insn_reservation "power7-crlogical" 3 + (and (eq_attr "type" "cr_logical") + (eq_attr "cpu" "power7")) + "du1_power7,cru_power7") + +(define_insn_reservation "power7-delayedcr" 3 + (and (eq_attr "type" "delayed_cr") + (eq_attr "cpu" "power7")) + "du1_power7,cru_power7") + +(define_insn_reservation "power7-mfcr" 6 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "power7")) + "du1_power7,cru_power7") + +(define_insn_reservation "power7-mfcrf" 3 + (and (eq_attr "type" "mfcrf") + (eq_attr "cpu" "power7")) + "du1_power7,cru_power7") + +(define_insn_reservation "power7-mtcr" 3 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "power7")) + "DU4_power7,cru_power7+FXU_power7") + + +; BR Unit +; Branches take dispatch Slot 4. The presence_sets prevent other insn from +; grabbing previous dispatch slots once this is assigned. +(define_insn_reservation "power7-branch" 3 + (and (eq_attr "type" "jmpreg,branch") + (eq_attr "cpu" "power7")) + "(du5_power7\ + |du4_power7+du5_power7\ + |du3_power7+du4_power7+du5_power7\ + |du2_power7+du3_power7+du4_power7+du5_power7\ + |du1_power7+du2_power7+du3_power7+du4_power7+du5_power7),bpu_power7") + + +; VS Unit (includes FP/VSX/VMX/DFP) +(define_insn_reservation "power7-fp" 6 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "power7")) + "DU_power7,VSU_power7") + +(define_bypass 8 "power7-fp" "power7-branch") + +(define_insn_reservation "power7-fpcompare" 4 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "power7")) + "DU_power7,VSU_power7") + +(define_insn_reservation "power7-sdiv" 26 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "power7")) + "DU_power7,VSU_power7") + +(define_insn_reservation "power7-ddiv" 32 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "power7")) + "DU_power7,VSU_power7") + +(define_insn_reservation "power7-sqrt" 31 + (and (eq_attr "type" "ssqrt") + (eq_attr "cpu" "power7")) + "DU_power7,VSU_power7") + +(define_insn_reservation "power7-dsqrt" 43 + (and (eq_attr "type" "dsqrt") + (eq_attr "cpu" "power7")) + "DU_power7,VSU_power7") + +(define_insn_reservation "power7-vecsimple" 2 + (and (eq_attr "type" "vecsimple") + (eq_attr "cpu" "power7")) + "du1_power7,VSU_power7") + +(define_insn_reservation "power7-veccmp" 7 + (and (eq_attr "type" "veccmp") + (eq_attr "cpu" "power7")) + "du1_power7,VSU_power7") + +(define_insn_reservation "power7-vecfloat" 7 + (and (eq_attr "type" "vecfloat") + (eq_attr "cpu" "power7")) + "du1_power7,VSU_power7") + +(define_bypass 6 "power7-vecfloat" "power7-vecfloat") + +(define_insn_reservation "power7-veccomplex" 7 + (and (eq_attr "type" "veccomplex") + (eq_attr "cpu" "power7")) + "du1_power7,VSU_power7") + +(define_insn_reservation "power7-vecperm" 3 + (and (eq_attr "type" "vecperm") + (eq_attr "cpu" "power7")) + "du2_power7,VSU_power7") diff --git a/gcc/config/rs6000/ppc-asm.h b/gcc/config/rs6000/ppc-asm.h new file mode 100644 index 000000000..c963eb98a --- /dev/null +++ b/gcc/config/rs6000/ppc-asm.h @@ -0,0 +1,358 @@ +/* PowerPC asm definitions for GNU C. + +Copyright (C) 2002, 2003, 2008, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* Under winnt, 1) gas supports the following as names and 2) in particular + defining "toc" breaks the FUNC_START macro as ".toc" becomes ".2" */ + +#define r0 0 +#define sp 1 +#define toc 2 +#define r3 3 +#define r4 4 +#define r5 5 +#define r6 6 +#define r7 7 +#define r8 8 +#define r9 9 +#define r10 10 +#define r11 11 +#define r12 12 +#define r13 13 +#define r14 14 +#define r15 15 +#define r16 16 +#define r17 17 +#define r18 18 +#define r19 19 +#define r20 20 +#define r21 21 +#define r22 22 +#define r23 23 +#define r24 24 +#define r25 25 +#define r26 26 +#define r27 27 +#define r28 28 +#define r29 29 +#define r30 30 +#define r31 31 + +#define cr0 0 +#define cr1 1 +#define cr2 2 +#define cr3 3 +#define cr4 4 +#define cr5 5 +#define cr6 6 +#define cr7 7 + +#define f0 0 +#define f1 1 +#define f2 2 +#define f3 3 +#define f4 4 +#define f5 5 +#define f6 6 +#define f7 7 +#define f8 8 +#define f9 9 +#define f10 10 +#define f11 11 +#define f12 12 +#define f13 13 +#define f14 14 +#define f15 15 +#define f16 16 +#define f17 17 +#define f18 18 +#define f19 19 +#define f20 20 +#define f21 21 +#define f22 22 +#define f23 23 +#define f24 24 +#define f25 25 +#define f26 26 +#define f27 27 +#define f28 28 +#define f29 29 +#define f30 30 +#define f31 31 + +#ifdef __VSX__ +#define f32 32 +#define f33 33 +#define f34 34 +#define f35 35 +#define f36 36 +#define f37 37 +#define f38 38 +#define f39 39 +#define f40 40 +#define f41 41 +#define f42 42 +#define f43 43 +#define f44 44 +#define f45 45 +#define f46 46 +#define f47 47 +#define f48 48 +#define f49 49 +#define f50 30 +#define f51 51 +#define f52 52 +#define f53 53 +#define f54 54 +#define f55 55 +#define f56 56 +#define f57 57 +#define f58 58 +#define f59 59 +#define f60 60 +#define f61 61 +#define f62 62 +#define f63 63 +#endif + +#ifdef __ALTIVEC__ +#define v0 0 +#define v1 1 +#define v2 2 +#define v3 3 +#define v4 4 +#define v5 5 +#define v6 6 +#define v7 7 +#define v8 8 +#define v9 9 +#define v10 10 +#define v11 11 +#define v12 12 +#define v13 13 +#define v14 14 +#define v15 15 +#define v16 16 +#define v17 17 +#define v18 18 +#define v19 19 +#define v20 20 +#define v21 21 +#define v22 22 +#define v23 23 +#define v24 24 +#define v25 25 +#define v26 26 +#define v27 27 +#define v28 28 +#define v29 29 +#define v30 30 +#define v31 31 +#endif + +#ifdef __VSX__ +#define vs0 0 +#define vs1 1 +#define vs2 2 +#define vs3 3 +#define vs4 4 +#define vs5 5 +#define vs6 6 +#define vs7 7 +#define vs8 8 +#define vs9 9 +#define vs10 10 +#define vs11 11 +#define vs12 12 +#define vs13 13 +#define vs14 14 +#define vs15 15 +#define vs16 16 +#define vs17 17 +#define vs18 18 +#define vs19 19 +#define vs20 20 +#define vs21 21 +#define vs22 22 +#define vs23 23 +#define vs24 24 +#define vs25 25 +#define vs26 26 +#define vs27 27 +#define vs28 28 +#define vs29 29 +#define vs30 30 +#define vs31 31 +#define vs32 32 +#define vs33 33 +#define vs34 34 +#define vs35 35 +#define vs36 36 +#define vs37 37 +#define vs38 38 +#define vs39 39 +#define vs40 40 +#define vs41 41 +#define vs42 42 +#define vs43 43 +#define vs44 44 +#define vs45 45 +#define vs46 46 +#define vs47 47 +#define vs48 48 +#define vs49 49 +#define vs50 30 +#define vs51 51 +#define vs52 52 +#define vs53 53 +#define vs54 54 +#define vs55 55 +#define vs56 56 +#define vs57 57 +#define vs58 58 +#define vs59 59 +#define vs60 60 +#define vs61 61 +#define vs62 62 +#define vs63 63 +#endif + +/* + * Macros to glue together two tokens. + */ + +#ifdef __STDC__ +#define XGLUE(a,b) a##b +#else +#define XGLUE(a,b) a/**/b +#endif + +#define GLUE(a,b) XGLUE(a,b) + +/* + * Macros to begin and end a function written in assembler. If -mcall-aixdesc + * or -mcall-nt, create a function descriptor with the given name, and create + * the real function with one or two leading periods respectively. + */ + +#if defined (__powerpc64__) +#define FUNC_NAME(name) GLUE(.,name) +#define JUMP_TARGET(name) FUNC_NAME(name) +#define FUNC_START(name) \ + .section ".opd","aw"; \ +name: \ + .quad GLUE(.,name); \ + .quad .TOC.@tocbase; \ + .quad 0; \ + .previous; \ + .type GLUE(.,name),@function; \ + .globl name; \ + .globl GLUE(.,name); \ +GLUE(.,name): + +#define HIDDEN_FUNC(name) \ + FUNC_START(name) \ + .hidden name; \ + .hidden GLUE(.,name); + +#define FUNC_END(name) \ +GLUE(.L,name): \ + .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name) + +#elif defined(_CALL_AIXDESC) + +#ifdef _RELOCATABLE +#define DESC_SECTION ".got2" +#else +#define DESC_SECTION ".got1" +#endif + +#define FUNC_NAME(name) GLUE(.,name) +#define JUMP_TARGET(name) FUNC_NAME(name) +#define FUNC_START(name) \ + .section DESC_SECTION,"aw"; \ +name: \ + .long GLUE(.,name); \ + .long _GLOBAL_OFFSET_TABLE_; \ + .long 0; \ + .previous; \ + .type GLUE(.,name),@function; \ + .globl name; \ + .globl GLUE(.,name); \ +GLUE(.,name): + +#define HIDDEN_FUNC(name) \ + FUNC_START(name) \ + .hidden name; \ + .hidden GLUE(.,name); + +#define FUNC_END(name) \ +GLUE(.L,name): \ + .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name) + +#else + +#define FUNC_NAME(name) GLUE(__USER_LABEL_PREFIX__,name) +#if defined __PIC__ || defined __pic__ +#define JUMP_TARGET(name) FUNC_NAME(name@plt) +#else +#define JUMP_TARGET(name) FUNC_NAME(name) +#endif +#define FUNC_START(name) \ + .type FUNC_NAME(name),@function; \ + .globl FUNC_NAME(name); \ +FUNC_NAME(name): + +#define HIDDEN_FUNC(name) \ + FUNC_START(name) \ + .hidden FUNC_NAME(name); + +#define FUNC_END(name) \ +GLUE(.L,name): \ + .size FUNC_NAME(name),GLUE(.L,name)-FUNC_NAME(name) +#endif + +#ifdef IN_GCC +/* For HAVE_GAS_CFI_DIRECTIVE. */ +#include "auto-host.h" + +#ifdef HAVE_GAS_CFI_DIRECTIVE +# define CFI_STARTPROC .cfi_startproc +# define CFI_ENDPROC .cfi_endproc +# define CFI_OFFSET(reg, off) .cfi_offset reg, off +# define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg +# define CFI_RESTORE(reg) .cfi_restore reg +#else +# define CFI_STARTPROC +# define CFI_ENDPROC +# define CFI_OFFSET(reg, off) +# define CFI_DEF_CFA_REGISTER(reg) +# define CFI_RESTORE(reg) +#endif +#endif + +#if defined __linux__ && !defined __powerpc64__ + .section .note.GNU-stack + .previous +#endif diff --git a/gcc/config/rs6000/ppc64-fp.c b/gcc/config/rs6000/ppc64-fp.c new file mode 100644 index 000000000..62861ee16 --- /dev/null +++ b/gcc/config/rs6000/ppc64-fp.c @@ -0,0 +1,239 @@ +/* Functions needed for soft-float on powerpc64-linux, copied from + libgcc2.c with macros expanded to force the use of specific types. + + Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2006, 2009 Free Software Foundation, + Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#if defined(__powerpc64__) || defined (__64BIT__) || defined(__ppc64__) +#define TMODES +#include "config/fp-bit.h" + +extern DItype __fixtfdi (TFtype); +extern DItype __fixdfdi (DFtype); +extern DItype __fixsfdi (SFtype); +extern USItype __fixunsdfsi (DFtype); +extern USItype __fixunssfsi (SFtype); +extern TFtype __floatditf (DItype); +extern TFtype __floatunditf (UDItype); +extern DFtype __floatdidf (DItype); +extern DFtype __floatundidf (UDItype); +extern SFtype __floatdisf (DItype); +extern SFtype __floatundisf (UDItype); +extern DItype __fixunstfdi (TFtype); + +static DItype local_fixunssfdi (SFtype); +static DItype local_fixunsdfdi (DFtype); + +DItype +__fixtfdi (TFtype a) +{ + if (a < 0) + return - __fixunstfdi (-a); + return __fixunstfdi (a); +} + +DItype +__fixdfdi (DFtype a) +{ + if (a < 0) + return - local_fixunsdfdi (-a); + return local_fixunsdfdi (a); +} + +DItype +__fixsfdi (SFtype a) +{ + if (a < 0) + return - local_fixunssfdi (-a); + return local_fixunssfdi (a); +} + +USItype +__fixunsdfsi (DFtype a) +{ + if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1)) + return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1)) + - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1); + return (SItype) a; +} + +USItype +__fixunssfsi (SFtype a) +{ + if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1)) + return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1)) + - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1); + return (SItype) a; +} + +TFtype +__floatditf (DItype u) +{ + DFtype dh, dl; + + dh = (SItype) (u >> (sizeof (SItype) * 8)); + dh *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1)); + dl = (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1)); + + return (TFtype) dh + (TFtype) dl; +} + +TFtype +__floatunditf (UDItype u) +{ + DFtype dh, dl; + + dh = (USItype) (u >> (sizeof (SItype) * 8)); + dh *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1)); + dl = (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1)); + + return (TFtype) dh + (TFtype) dl; +} + +DFtype +__floatdidf (DItype u) +{ + DFtype d; + + d = (SItype) (u >> (sizeof (SItype) * 8)); + d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1)); + d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1)); + + return d; +} + +DFtype +__floatundidf (UDItype u) +{ + DFtype d; + + d = (USItype) (u >> (sizeof (SItype) * 8)); + d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1)); + d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1)); + + return d; +} + +SFtype +__floatdisf (DItype u) +{ + DFtype f; + + if (53 < (sizeof (DItype) * 8) + && 53 > ((sizeof (DItype) * 8) - 53 + 24)) + { + if (! (- ((DItype) 1 << 53) < u + && u < ((DItype) 1 << 53))) + { + if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1)) + { + u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1); + u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53)); + } + } + } + f = (SItype) (u >> (sizeof (SItype) * 8)); + f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1)); + f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1)); + + return (SFtype) f; +} + +SFtype +__floatundisf (UDItype u) +{ + DFtype f; + + if (53 < (sizeof (DItype) * 8) + && 53 > ((sizeof (DItype) * 8) - 53 + 24)) + { + if (u >= ((UDItype) 1 << 53)) + { + if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1)) + { + u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1); + u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53)); + } + } + } + f = (USItype) (u >> (sizeof (SItype) * 8)); + f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1)); + f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1)); + + return (SFtype) f; +} + +DItype +__fixunstfdi (TFtype a) +{ + if (a < 0) + return 0; + + /* Compute high word of result, as a flonum. */ + const TFtype b = (a / (((UDItype) 1) << (sizeof (SItype) * 8))); + /* Convert that to fixed (but not to DItype!), + and shift it into the high word. */ + UDItype v = (USItype) b; + v <<= (sizeof (SItype) * 8); + /* Remove high part from the TFtype, leaving the low part as flonum. */ + a -= (TFtype) v; + /* Convert that to fixed (but not to DItype!) and add it in. + Sometimes A comes out negative. This is significant, since + A has more bits than a long int does. */ + if (a < 0) + v -= (USItype) (-a); + else + v += (USItype) a; + return v; +} + +/* This version is needed to prevent recursion; fixunsdfdi in libgcc + calls fixdfdi, which in turn calls calls fixunsdfdi. */ + +static DItype +local_fixunsdfdi (DFtype a) +{ + USItype hi, lo; + + hi = a / (((UDItype) 1) << (sizeof (SItype) * 8)); + lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8))); + return ((UDItype) hi << (sizeof (SItype) * 8)) | lo; +} + +/* This version is needed to prevent recursion; fixunssfdi in libgcc + calls fixsfdi, which in turn calls calls fixunssfdi. */ + +static DItype +local_fixunssfdi (SFtype original_a) +{ + DFtype a = original_a; + USItype hi, lo; + + hi = a / (((UDItype) 1) << (sizeof (SItype) * 8)); + lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8))); + return ((UDItype) hi << (sizeof (SItype) * 8)) | lo; +} + +#endif /* __powerpc64__ */ diff --git a/gcc/config/rs6000/ppu_intrinsics.h b/gcc/config/rs6000/ppu_intrinsics.h new file mode 100644 index 000000000..0950f33aa --- /dev/null +++ b/gcc/config/rs6000/ppu_intrinsics.h @@ -0,0 +1,727 @@ +/* PPU intrinsics as defined by the C/C++ Language extension for Cell BEA. + Copyright (C) 2007, 2009 Free Software Foundation, Inc. + + This file is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your option) + any later version. + + This file is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* TODO: + misc ops (traps) + supervisor/hypervisor mode ops. */ + +#ifndef _PPU_INTRINSICS_H +#define _PPU_INTRINSICS_H + +#if !defined(__PPU__) && !defined(__ppc__) && !defined(__ppc64__) \ + && !defined(__GNUC__) + #error ppu_intrinsics.h included on wrong platform/compiler +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * unsigned int __cntlzw(unsigned int) + * unsigned int __cntlzd(unsigned long long) + * int __mulhw(int, int) + * unsigned int __mulhwu(unsigned int, unsigned int) + * long long __mulhd(long long, long long) + * unsigned long long __mulhdu(unsigned long long, unsigned long long) + * + * void __sync(void) + * void __isync(void) + * void __lwsync(void) + * void __eieio(void) + * + * void __nop(void) + * void __cctpl(void) + * void __cctpm(void) + * void __cctph(void) + * void __db8cyc(void) + * void __db10cyc(void) + * void __db12cyc(void) + * void __db16cyc(void) + * + * void __mtspr(unsigned int spr, unsigned long long value) + * unsigned long long __mfspr(unsigned int spr) + * unsigned long long __mftb(void) + * + * void __icbi(void *base) + * void __dcbi(void *base) + * + * void __dcbf(void *base) + * void __dcbz(void *base) + * void __dcbst(void *base) + * void __dcbtst(void *base) + * void __dcbt(void *base) + * void __dcbt_TH1000(void *EATRUNC, bool D, bool UG, int ID) + * void __dcbt_TH1010(bool GO, int S, int UNITCNT, bool T, bool U, int ID) + * + * unsigned __lwarx(void *base) + * unsigned long long __ldarx(void *base) + * bool __stwcx(void *base, unsigned value) + * bool __stdcx(void *base, unsigned long long value) + * + * unsigned short __lhbrx(void *base) + * unsigned int __lwbrx(void *base) + * unsigned long long __ldbrx(void *base) + * void __sthbrx(void *base, unsigned short value) + * void __stwbrx(void *base, unsigned int value) + * void __stdbrx(void *base, unsigned long long value) + * + * double __fabs(double x) + * float __fabsf(float x) + * double __fnabs(double x) + * float __fnabsf(float x) + * double __fmadd(double x, double y, double z) + * double __fmsub(double x, double y, double z) + * double __fnmadd(double x, double y, double z) + * double __fnmsub(double x, double y, double z) + * float __fmadds(float x, float y, float z) + * float __fmsubs(float x, float y, float z) + * float __fnmadds(float x, float y, float z) + * float __fnmsubs(float x, float y, float z) + * double __fsel(double x, double y, double z) + * float __fsels(float x, float y, float z) + * double __frsqrte(double x) + * float __fres(float x) + * double __fsqrt(double x) + * float __fsqrts(float x) + * long long __fctid(double x) + * long long __fctiw(double x) + * double __fcfid(long long x) + * double __mffs(void) + * void __mtfsf(int mask, double value) + * void __mtfsfi(int bits, int field) + * void __mtfsb0(int) + * void __mtfsb1(int) + * double __setflm(double) + * + * dcbt intrinsics + * void __protected_unlimited_stream_set (unsigned int direction, const void *add, unsigned int ID) + * void __protected_stream_set (unsigned int direction, const void *add, unsigned int ID) + * void __protected_stream_stop_all (void) + * void __protected_stream_stop (unsigned int ID) + * void __protected_stream_count (unsigned int unit_cnt, unsigned int ID) + * void __protected_stream_go (void) + */ + +typedef int __V4SI __attribute__((vector_size(16))); + +#define __cntlzw(v) __builtin_clz(v) +#define __cntlzd(v) __builtin_clzll(v) + +#define __mulhw(a,b) __extension__ \ + ({int result; \ + __asm__ ("mulhw %0,%1,%2" \ + : "=r" (result) \ + : "r" ((int) (a)), \ + "r" ((int) (b))); \ + result; }) + +#define __mulhwu(a,b) __extension__ \ + ({unsigned int result; \ + __asm__ ("mulhwu %0,%1,%2" \ + : "=r" (result) \ + : "r" ((unsigned int) (a)), \ + "r" ((unsigned int) (b))); \ + result; }) + +#ifdef __powerpc64__ +#define __mulhd(a,b) __extension__ \ + ({ long long result; \ + __asm__ ("mulhd %0,%1,%2" \ + : "=r" (result) \ + : "r" ((long long) (a)), \ + "r" ((long long) (b))); \ + result; }) + +#define __mulhdu(a,b) __extension__ \ + ({unsigned long long result; \ + __asm__ ("mulhdu %0,%1,%2" \ + : "=r" (result) \ + : "r" ((unsigned long long) (a)), \ + "r" ((unsigned long long) (b))); \ + result; }) +#endif /* __powerpc64__ */ + +#define __sync() __asm__ volatile ("sync" : : : "memory") +#define __isync() __asm__ volatile ("isync" : : : "memory") +#define __lwsync() __asm__ volatile ("lwsync" : : : "memory") +#define __eieio() __asm__ volatile ("eieio" : : : "memory") + +#define __nop() __asm__ volatile ("ori 0,0,0" : : : "memory") +#define __cctpl() __asm__ volatile ("or 1,1,1" : : : "memory") +#define __cctpm() __asm__ volatile ("or 2,2,2" : : : "memory") +#define __cctph() __asm__ volatile ("or 3,3,3" : : : "memory") +#define __db8cyc() __asm__ volatile ("or 28,28,28" : : : "memory") +#define __db10cyc() __asm__ volatile ("or 29,29,29" : : : "memory") +#define __db12cyc() __asm__ volatile ("or 30,30,30" : : : "memory") +#define __db16cyc() __asm__ volatile ("or 31,31,31" : : : "memory") + +#ifdef __powerpc64__ +#define __mtspr(spr, value) \ + __asm__ volatile ("mtspr %0,%1" : : "n" (spr), "r" (value)) + +#define __mfspr(spr) __extension__ \ + ({ unsigned long long result; \ + __asm__ volatile ("mfspr %0,%1" : "=r" (result) : "n" (spr)); \ + result; }) +#endif /* __powerpc64__ */ + +#ifdef __powerpc64__ +/* Work around the hardware bug in the current Cell implementation. */ +#define __mftb() __extension__ \ + ({ unsigned long long result; \ + __asm__ volatile ("1: mftb %[current_tb]\n" \ + "\tcmpwi 7, %[current_tb], 0\n" \ + "\tbeq- 7, 1b" \ + : [current_tb] "=r" (result): \ + :"cr7"); \ + result; }) +#else +#define __mftb() __extension__ \ + ({ unsigned long long result; \ + unsigned long t; \ + __asm__ volatile ("1:\n" \ + "\tmftbu %0\n" \ + "\tmftb %L0\n" \ + "\tmftbu %1\n" \ + "\tcmpw %0,%1\n" \ + "\tbne 1b" \ + : "=r" (result), "=r" (t)); \ + result; }) +#endif /* __powerpc64__ */ + +#define __dcbf(base) \ + __asm__ volatile ("dcbf %y0" : "=Z" (*(__V4SI*) (base)) : : "memory") + +#define __dcbz(base) \ + __asm__ volatile ("dcbz %y0" : "=Z" (*(__V4SI*) (base)) : : "memory") + +#define __dcbst(base) \ + __asm__ volatile ("dcbst %y0" : "=Z" (*(__V4SI*) (base)) : : "memory") + +#define __dcbtst(base) \ + __asm__ volatile ("dcbtst %y0" : "=Z" (*(__V4SI*) (base)) : : "memory") + +#define __dcbt(base) \ + __asm__ volatile ("dcbt %y0" : "=Z" (*(__V4SI*) (base)) : : "memory") + +#define __icbi(base) \ + __asm__ volatile ("icbi %y0" : "=Z" (*(__V4SI*) (base)) : : "memory") + +#define __dcbt_TH1000(EATRUNC, D, UG, ID) \ + __asm__ volatile ("dcbt %y0,8" \ + : "=Z" (*(__V4SI*) (__SIZE_TYPE__)((((__SIZE_TYPE__) (EATRUNC)) & ~0x7F) \ + | ((((D) & 1) << 6) \ + | (((UG) & 1) << 5) \ + | ((ID) & 0xF)))) : : "memory") + +#define __dcbt_TH1010(GO, S, UNITCNT, T, U, ID) \ + __asm__ volatile ("dcbt %y0,10" \ + : "=Z" (*(__V4SI*) (__SIZE_TYPE__)((((__SIZE_TYPE__) (GO) & 1) << 31) \ + | (((S) & 0x3) << 29) \ + | (((UNITCNT) & 0x3FF) << 7) \ + | (((T) & 1) << 6) \ + | (((U) & 1) << 5) \ + | ((ID) & 0xF))) : : "memory") + +#define __protected_unlimited_stream_set(DIRECTION, ADDR, ID) \ + __dcbt_TH1000 ((ADDR), (DIRECTION)>>1, 1, (ID)) + +#define __protected_stream_set(DIRECTION, ADDR, ID) \ + __dcbt_TH1000 ((ADDR), (DIRECTION)>>1, 0, (ID)) + +#define __protected_stream_stop_all() \ + __dcbt_TH1010 (0, 3, 0, 0, 0, 0) + +#define __protected_stream_stop(ID) \ + __dcbt_TH1010 (0, 2, 0, 0, 0, (ID)) + +#define __protected_stream_count(COUNT, ID) \ + __dcbt_TH1010 (0, 0, (COUNT), 0, 0, (ID)) + +#define __protected_stream_go() \ + __dcbt_TH1010 (1, 0, 0, 0, 0, 0) + +#define __lhbrx(base) __extension__ \ + ({unsigned short result; \ + typedef struct {char a[2];} halfwordsize; \ + halfwordsize *ptrp = (halfwordsize*)(void*)(base); \ + __asm__ ("lhbrx %0,%y1" \ + : "=r" (result) \ + : "Z" (*ptrp)); \ + result; }) + +#define __lwbrx(base) __extension__ \ + ({unsigned int result; \ + typedef struct {char a[4];} wordsize; \ + wordsize *ptrp = (wordsize*)(void*)(base); \ + __asm__ ("lwbrx %0,%y1" \ + : "=r" (result) \ + : "Z" (*ptrp)); \ + result; }) + + +#ifdef __powerpc64__ +#define __ldbrx(base) __extension__ \ + ({unsigned long long result; \ + typedef struct {char a[8];} doublewordsize; \ + doublewordsize *ptrp = (doublewordsize*)(void*)(base); \ + __asm__ ("ldbrx %0,%y1" \ + : "=r" (result) \ + : "Z" (*ptrp)); \ + result; }) +#else +#define __ldbrx(base) __extension__ \ + ({unsigned long long result; \ + typedef struct {char a[8];} doublewordsize; \ + doublewordsize *ptrp = (doublewordsize*)(void*)(base); \ + __asm__ ("lwbrx %L0,%y1\n" \ + "\tlwbrx %0,%y2" \ + : "=&r" (result) \ + : "Z" (*ptrp), "Z" (*((char *) ptrp + 4))); \ + result; }) +#endif /* __powerpc64__ */ + + +#define __sthbrx(base, value) do { \ + typedef struct {char a[2];} halfwordsize; \ + halfwordsize *ptrp = (halfwordsize*)(void*)(base); \ + __asm__ ("sthbrx %1,%y0" \ + : "=Z" (*ptrp) \ + : "r" (value)); \ + } while (0) + +#define __stwbrx(base, value) do { \ + typedef struct {char a[4];} wordsize; \ + wordsize *ptrp = (wordsize*)(void*)(base); \ + __asm__ ("stwbrx %1,%y0" \ + : "=Z" (*ptrp) \ + : "r" (value)); \ + } while (0) + +#ifdef __powerpc64__ +#define __stdbrx(base, value) do { \ + typedef struct {char a[8];} doublewordsize; \ + doublewordsize *ptrp = (doublewordsize*)(void*)(base); \ + __asm__ ("stdbrx %1,%y0" \ + : "=Z" (*ptrp) \ + : "r" (value)); \ + } while (0) +#else +#define __stdbrx(base, value) do { \ + typedef struct {char a[8];} doublewordsize; \ + doublewordsize *ptrp = (doublewordsize*)(void*)(base); \ + __asm__ ("stwbrx %L2,%y0\n" \ + "\tstwbrx %2,%y1" \ + : "=Z" (*ptrp), "=Z" (*((char *) ptrp + 4)) \ + : "r" (value)); \ + } while (0) +#endif /* __powerpc64__ */ + + +#define __lwarx(base) __extension__ \ + ({unsigned int result; \ + typedef struct {char a[4];} wordsize; \ + wordsize *ptrp = (wordsize*)(void*)(base); \ + __asm__ volatile ("lwarx %0,%y1" \ + : "=r" (result) \ + : "Z" (*ptrp)); \ + result; }) + +#ifdef __powerpc64__ +#define __ldarx(base) __extension__ \ + ({unsigned long long result; \ + typedef struct {char a[8];} doublewordsize; \ + doublewordsize *ptrp = (doublewordsize*)(void*)(base); \ + __asm__ volatile ("ldarx %0,%y1" \ + : "=r" (result) \ + : "Z" (*ptrp)); \ + result; }) +#endif /* __powerpc64__ */ + +#define __stwcx(base, value) __extension__ \ + ({unsigned int result; \ + typedef struct {char a[4];} wordsize; \ + wordsize *ptrp = (wordsize*)(void*)(base); \ + __asm__ volatile ("stwcx. %2,%y1\n" \ + "\tmfocrf %0,0x80" \ + : "=r" (result), \ + "=Z" (*ptrp) \ + : "r" (value) : "cr0"); \ + ((result & 0x20000000) >> 29); }) + + +#ifdef __powerpc64__ +#define __stdcx(base, value) __extension__ \ + ({unsigned long long result; \ + typedef struct {char a[8];} doublewordsize; \ + doublewordsize *ptrp = (doublewordsize*)(void*)(base); \ + __asm__ volatile ("stdcx. %2,%y1\n" \ + "\tmfocrf %0,0x80" \ + : "=r" (result), \ + "=Z" (*ptrp) \ + : "r" (value) : "cr0"); \ + ((result & 0x20000000) >> 29); }) +#endif /* __powerpc64__ */ + +#define __mffs() __extension__ \ + ({double result; \ + __asm__ volatile ("mffs %0" : "=d" (result)); \ + result; }) + +#define __mtfsf(mask,value) \ + __asm__ volatile ("mtfsf %0,%1" : : "n" (mask), "d" ((double) (value))) + +#define __mtfsfi(bits,field) \ + __asm__ volatile ("mtfsfi %0,%1" : : "n" (bits), "n" (field)) + +#define __mtfsb0(bit) __asm__ volatile ("mtfsb0 %0" : : "n" (bit)) +#define __mtfsb1(bit) __asm__ volatile ("mtfsb1 %0" : : "n" (bit)) + +#define __setflm(v) __extension__ \ + ({double result; \ + __asm__ volatile ("mffs %0\n\tmtfsf 255,%1" \ + : "=&d" (result) \ + : "d" ((double) (v))); \ + result; }) + +/* __builtin_fabs may perform unnecessary rounding. */ + +/* Rename __fabs and __fabsf to work around internal prototypes defined + in bits/mathcalls.h with some glibc versions. */ +#define __fabs __ppu_fabs +#define __fabsf __ppu_fabsf + +static __inline__ double __fabs(double x) __attribute__((always_inline)); +static __inline__ double +__fabs(double x) +{ + double r; + __asm__("fabs %0,%1" : "=d"(r) : "d"(x)); + return r; +} + +static __inline__ float __fabsf(float x) __attribute__((always_inline)); +static __inline__ float +__fabsf(float x) +{ + float r; + __asm__("fabs %0,%1" : "=f"(r) : "f"(x)); + return r; +} + +static __inline__ double __fnabs(double x) __attribute__((always_inline)); +static __inline__ double +__fnabs(double x) +{ + double r; + __asm__("fnabs %0,%1" : "=d"(r) : "d"(x)); + return r; +} + +static __inline__ float __fnabsf(float x) __attribute__((always_inline)); +static __inline__ float +__fnabsf(float x) +{ + float r; + __asm__("fnabs %0,%1" : "=f"(r) : "f"(x)); + return r; +} + +static __inline__ double __fmadd(double x, double y, double z) + __attribute__((always_inline)); +static __inline__ double +__fmadd(double x, double y, double z) +{ + double r; + __asm__("fmadd %0,%1,%2,%3" : "=d"(r) : "d"(x),"d"(y),"d"(z)); + return r; +} + +static __inline__ double __fmsub(double x, double y, double z) + __attribute__((always_inline)); +static __inline__ double +__fmsub(double x, double y, double z) +{ + double r; + __asm__("fmsub %0,%1,%2,%3" : "=d"(r) : "d"(x),"d"(y),"d"(z)); + return r; +} + +static __inline__ double __fnmadd(double x, double y, double z) + __attribute__((always_inline)); +static __inline__ double +__fnmadd(double x, double y, double z) +{ + double r; + __asm__("fnmadd %0,%1,%2,%3" : "=d"(r) : "d"(x),"d"(y),"d"(z)); + return r; +} + +static __inline__ double __fnmsub(double x, double y, double z) + __attribute__((always_inline)); +static __inline__ double +__fnmsub(double x, double y, double z) +{ + double r; + __asm__("fnmsub %0,%1,%2,%3" : "=d"(r) : "d"(x),"d"(y),"d"(z)); + return r; +} + +static __inline__ float __fmadds(float x, float y, float z) + __attribute__((always_inline)); +static __inline__ float +__fmadds(float x, float y, float z) +{ + float r; + __asm__("fmadds %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z)); + return r; +} + +static __inline__ float __fmsubs(float x, float y, float z) + __attribute__((always_inline)); +static __inline__ float +__fmsubs(float x, float y, float z) +{ + float r; + __asm__("fmsubs %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z)); + return r; +} + +static __inline__ float __fnmadds(float x, float y, float z) + __attribute__((always_inline)); +static __inline__ float +__fnmadds(float x, float y, float z) +{ + float r; + __asm__("fnmadds %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z)); + return r; +} + +static __inline__ float __fnmsubs(float x, float y, float z) + __attribute__((always_inline)); +static __inline__ float +__fnmsubs(float x, float y, float z) +{ + float r; + __asm__("fnmsubs %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z)); + return r; +} + +static __inline__ double __fsel(double x, double y, double z) + __attribute__((always_inline)); +static __inline__ double +__fsel(double x, double y, double z) +{ + double r; + __asm__("fsel %0,%1,%2,%3" : "=d"(r) : "d"(x),"d"(y),"d"(z)); + return r; +} + +static __inline__ float __fsels(float x, float y, float z) + __attribute__((always_inline)); +static __inline__ float +__fsels(float x, float y, float z) +{ + float r; + __asm__("fsel %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z)); + return r; +} + +static __inline__ double __frsqrte(double x) __attribute__((always_inline)); +static __inline__ double +__frsqrte(double x) +{ + double r; + __asm__("frsqrte %0,%1" : "=d" (r) : "d" (x)); + return r; +} + +static __inline__ float __fres(float x) __attribute__((always_inline)); +static __inline__ float +__fres(float x) +{ + float r; + __asm__("fres %0,%1" : "=f"(r) : "f"(x)); + return r; +} + +static __inline__ double __fsqrt(double x) __attribute__((always_inline)); +static __inline__ double +__fsqrt(double x) +{ + double r; + __asm__("fsqrt %0,%1" : "=d"(r) : "d"(x)); + return r; +} + +static __inline__ float __fsqrts(float x) __attribute__((always_inline)); +static __inline__ float +__fsqrts(float x) +{ + float r; + __asm__("fsqrts %0,%1" : "=f"(r) : "f"(x)); + return r; +} + +static __inline__ double __fmul (double a, double b) __attribute__ ((always_inline)); +static __inline__ double +__fmul(double a, double b) +{ + double d; + __asm__ ("fmul %0,%1,%2" : "=d" (d) : "d" (a), "d" (b)); + return d; +} + +static __inline__ float __fmuls (float a, float b) __attribute__ ((always_inline)); +static __inline__ float +__fmuls (float a, float b) +{ + float d; + __asm__ ("fmuls %0,%1,%2" : "=d" (d) : "f" (a), "f" (b)); + return d; +} + +static __inline__ float __frsp (float a) __attribute__ ((always_inline)); +static __inline__ float +__frsp (float a) +{ + float d; + __asm__ ("frsp %0,%1" : "=d" (d) : "f" (a)); + return d; +} + +static __inline__ double __fcfid (long long a) __attribute__((always_inline)); +static __inline__ double +__fcfid (long long a) +{ + double d; + __asm__ ("fcfid %0,%1" : "=d" (d) : "d" (a)); + return d; +} + +static __inline__ long long __fctid (double a) __attribute__ ((always_inline)); +static __inline__ long long +__fctid (double a) +{ + long long d; + __asm__ ("fctid %0,%1" : "=d" (d) : "d" (a)); + return d; +} + +static __inline__ long long __fctidz (double a) __attribute__ ((always_inline)); +static __inline__ long long +__fctidz (double a) +{ + long long d; + __asm__ ("fctidz %0,%1" : "=d" (d) : "d" (a)); + return d; +} + +static __inline__ int __fctiw (double a) __attribute__ ((always_inline)); +static __inline__ int +__fctiw (double a) +{ + unsigned long long d; + __asm__ ("fctiw %0,%1" : "=d" (d) : "d" (a)); + return (int) d; +} + +static __inline__ int __fctiwz (double a) __attribute__ ((always_inline)); +static __inline__ int +__fctiwz (double a) +{ + long long d; + __asm__ ("fctiwz %0,%1" : "=d" (d) : "d" (a)); + return (int) d; +} + +#ifdef __powerpc64__ +#define __rldcl(a,b,mb) __extension__ \ + ({ \ + unsigned long long d; \ + __asm__ ("rldcl %0,%1,%2,%3" : "=r" (d) : "r" (a), "r" (b), "i" (mb)); \ + d; \ + }) + +#define __rldcr(a,b,me) __extension__ \ + ({ \ + unsigned long long d; \ + __asm__ ("rldcr %0,%1,%2,%3" : "=r" (d) : "r" (a), "r" (b), "i" (me)); \ + d; \ + }) + +#define __rldic(a,sh,mb) __extension__ \ + ({ \ + unsigned long long d; \ + __asm__ ("rldic %0,%1,%2,%3" : "=r" (d) : "r" (a), "i" (sh), "i" (mb)); \ + d; \ + }) + +#define __rldicl(a,sh,mb) __extension__ \ + ({ \ + unsigned long long d; \ + __asm__ ("rldicl %0,%1,%2,%3" : "=r" (d) : "r" (a), "i" (sh), "i" (mb)); \ + d; \ + }) + +#define __rldicr(a,sh,me) __extension__ \ + ({ \ + unsigned long long d; \ + __asm__ ("rldicr %0,%1,%2,%3" : "=r" (d) : "r" (a), "i" (sh), "i" (me)); \ + d; \ + }) + +#define __rldimi(a,b,sh,mb) __extension__ \ + ({ \ + unsigned long long d; \ + __asm__ ("rldimi %0,%1,%2,%3" : "=r" (d) : "r" (b), "i" (sh), "i" (mb), "0" (a)); \ + d; \ + }) +#endif /* __powerpc64__ */ + +#define __rlwimi(a,b,sh,mb,me) __extension__ \ + ({ \ + unsigned int d; \ + __asm__ ("rlwimi %0,%1,%2,%3,%4" : "=r" (d) : "r" (b), "i" (sh), "i" (mb), "i" (me), "0" (a)); \ + d; \ + }) + +#define __rlwinm(a,sh,mb,me) __extension__ \ + ({ \ + unsigned int d; \ + __asm__ ("rlwinm %0,%1,%2,%3,%4" : "=r" (d) : "r" (a), "i" (sh), "i" (mb), "i" (me)); \ + d; \ + }) + +#define __rlwnm(a,b,mb,me) __extension__ \ + ({ \ + unsigned int d; \ + __asm__ ("rlwnm %0,%1,%2,%3,%4" : "=r" (d) : "r" (a), "r" (b), "i" (mb), "i" (me)); \ + d; \ + }) + +#ifdef __cplusplus +} +#endif + +#endif /* _PPU_INTRINSICS_H */ diff --git a/gcc/config/rs6000/predicates.md b/gcc/config/rs6000/predicates.md new file mode 100644 index 000000000..90947452b --- /dev/null +++ b/gcc/config/rs6000/predicates.md @@ -0,0 +1,1423 @@ +;; Predicate definitions for POWER and PowerPC. +;; Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 +;; Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; Return 1 for anything except PARALLEL. +(define_predicate "any_operand" + (match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem")) + +;; Return 1 for any PARALLEL. +(define_predicate "any_parallel_operand" + (match_code "parallel")) + +;; Return 1 if op is COUNT register. +(define_predicate "count_register_operand" + (and (match_code "reg") + (match_test "REGNO (op) == CTR_REGNO + || REGNO (op) > LAST_VIRTUAL_REGISTER"))) + +;; Return 1 if op is an Altivec register. +(define_predicate "altivec_register_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || ALTIVEC_REGNO_P (REGNO (op)) + || REGNO (op) > LAST_VIRTUAL_REGISTER"))) + +;; Return 1 if op is a VSX register. +(define_predicate "vsx_register_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || VSX_REGNO_P (REGNO (op)) + || REGNO (op) > LAST_VIRTUAL_REGISTER"))) + +;; Return 1 if op is a vector register that operates on floating point vectors +;; (either altivec or VSX). +(define_predicate "vfloat_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || VFLOAT_REGNO_P (REGNO (op)) + || REGNO (op) > LAST_VIRTUAL_REGISTER"))) + +;; Return 1 if op is a vector register that operates on integer vectors +;; (only altivec, VSX doesn't support integer vectors) +(define_predicate "vint_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || VINT_REGNO_P (REGNO (op)) + || REGNO (op) > LAST_VIRTUAL_REGISTER"))) + +;; Return 1 if op is a vector register to do logical operations on (and, or, +;; xor, etc.) +(define_predicate "vlogical_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || VLOGICAL_REGNO_P (REGNO (op)) + || REGNO (op) > LAST_VIRTUAL_REGISTER"))) + +;; Return 1 if op is the carry register. +(define_predicate "ca_operand" + (and (match_code "reg") + (match_test "CA_REGNO_P (REGNO (op))"))) + +;; Return 1 if op is a signed 5-bit constant integer. +(define_predicate "s5bit_cint_operand" + (and (match_code "const_int") + (match_test "INTVAL (op) >= -16 && INTVAL (op) <= 15"))) + +;; Return 1 if op is a unsigned 5-bit constant integer. +(define_predicate "u5bit_cint_operand" + (and (match_code "const_int") + (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 31"))) + +;; Return 1 if op is a signed 8-bit constant integer. +;; Integer multiplication complete more quickly +(define_predicate "s8bit_cint_operand" + (and (match_code "const_int") + (match_test "INTVAL (op) >= -128 && INTVAL (op) <= 127"))) + +;; Return 1 if op is a constant integer that can fit in a D field. +(define_predicate "short_cint_operand" + (and (match_code "const_int") + (match_test "satisfies_constraint_I (op)"))) + +;; Return 1 if op is a constant integer that can fit in an unsigned D field. +(define_predicate "u_short_cint_operand" + (and (match_code "const_int") + (match_test "satisfies_constraint_K (op)"))) + +;; Return 1 if op is a constant integer that cannot fit in a signed D field. +(define_predicate "non_short_cint_operand" + (and (match_code "const_int") + (match_test "(unsigned HOST_WIDE_INT) + (INTVAL (op) + 0x8000) >= 0x10000"))) + +;; Return 1 if op is a positive constant integer that is an exact power of 2. +(define_predicate "exact_log2_cint_operand" + (and (match_code "const_int") + (match_test "INTVAL (op) > 0 && exact_log2 (INTVAL (op)) >= 0"))) + +;; Return 1 if op is a register that is not special. +(define_predicate "gpc_reg_operand" + (and (match_operand 0 "register_operand") + (match_test "(GET_CODE (op) != REG + || (REGNO (op) >= ARG_POINTER_REGNUM + && !CA_REGNO_P (REGNO (op))) + || REGNO (op) < MQ_REGNO) + && !((TARGET_E500_DOUBLE || TARGET_SPE) + && invalid_e500_subreg (op, mode))"))) + +;; Return 1 if op is a register that is a condition register field. +(define_predicate "cc_reg_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || REGNO (op) > LAST_VIRTUAL_REGISTER + || CR_REGNO_P (REGNO (op))"))) + +;; Return 1 if op is a register that is a condition register field not cr0. +(define_predicate "cc_reg_not_cr0_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || REGNO (op) > LAST_VIRTUAL_REGISTER + || CR_REGNO_NOT_CR0_P (REGNO (op))"))) + +;; Return 1 if op is a register that is a condition register field and if generating microcode, not cr0. +(define_predicate "cc_reg_not_micro_cr0_operand" + (and (match_operand 0 "register_operand") + (match_test "GET_CODE (op) != REG + || REGNO (op) > LAST_VIRTUAL_REGISTER + || (rs6000_gen_cell_microcode && CR_REGNO_NOT_CR0_P (REGNO (op))) + || (!rs6000_gen_cell_microcode && CR_REGNO_P (REGNO (op)))"))) + +;; Return 1 if op is a constant integer valid for D field +;; or non-special register register. +(define_predicate "reg_or_short_operand" + (if_then_else (match_code "const_int") + (match_operand 0 "short_cint_operand") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if op is a constant integer valid whose negation is valid for +;; D field or non-special register register. +;; Do not allow a constant zero because all patterns that call this +;; predicate use "addic r1,r2,-const" to set carry when r2 is greater than +;; or equal to const, which does not work for zero. +(define_predicate "reg_or_neg_short_operand" + (if_then_else (match_code "const_int") + (match_test "satisfies_constraint_P (op) + && INTVAL (op) != 0") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if op is a constant integer valid for DS field +;; or non-special register. +(define_predicate "reg_or_aligned_short_operand" + (if_then_else (match_code "const_int") + (and (match_operand 0 "short_cint_operand") + (match_test "!(INTVAL (op) & 3)")) + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if op is a constant integer whose high-order 16 bits are zero +;; or non-special register. +(define_predicate "reg_or_u_short_operand" + (if_then_else (match_code "const_int") + (match_operand 0 "u_short_cint_operand") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if op is any constant integer +;; or non-special register. +(define_predicate "reg_or_cint_operand" + (ior (match_code "const_int") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if op is a constant integer valid for addition +;; or non-special register. +(define_predicate "reg_or_add_cint_operand" + (if_then_else (match_code "const_int") + (match_test "(HOST_BITS_PER_WIDE_INT == 32 + && (mode == SImode || INTVAL (op) < 0x7fff8000)) + || ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80008000) + < (unsigned HOST_WIDE_INT) 0x100000000ll)") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if op is a constant integer valid for subtraction +;; or non-special register. +(define_predicate "reg_or_sub_cint_operand" + (if_then_else (match_code "const_int") + (match_test "(HOST_BITS_PER_WIDE_INT == 32 + && (mode == SImode || - INTVAL (op) < 0x7fff8000)) + || ((unsigned HOST_WIDE_INT) (- INTVAL (op) + + (mode == SImode + ? 0x80000000 : 0x80008000)) + < (unsigned HOST_WIDE_INT) 0x100000000ll)") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if op is any 32-bit unsigned constant integer +;; or non-special register. +(define_predicate "reg_or_logical_cint_operand" + (if_then_else (match_code "const_int") + (match_test "(GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT + && INTVAL (op) >= 0) + || ((INTVAL (op) & GET_MODE_MASK (mode) + & (~ (unsigned HOST_WIDE_INT) 0xffffffff)) == 0)") + (if_then_else (match_code "const_double") + (match_test "GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT + && mode == DImode + && CONST_DOUBLE_HIGH (op) == 0") + (match_operand 0 "gpc_reg_operand")))) + +;; Return 1 if operand is a CONST_DOUBLE that can be set in a register +;; with no more than one instruction per word. +(define_predicate "easy_fp_constant" + (match_code "const_double") +{ + long k[4]; + REAL_VALUE_TYPE rv; + + if (GET_MODE (op) != mode + || (!SCALAR_FLOAT_MODE_P (mode) && mode != DImode)) + return 0; + + /* Consider all constants with -msoft-float to be easy. */ + if ((TARGET_SOFT_FLOAT || TARGET_E500_SINGLE + || (TARGET_HARD_FLOAT && (TARGET_SINGLE_FLOAT && ! TARGET_DOUBLE_FLOAT))) + && mode != DImode) + return 1; + + if (DECIMAL_FLOAT_MODE_P (mode)) + return 0; + + /* If we are using V.4 style PIC, consider all constants to be hard. */ + if (flag_pic && DEFAULT_ABI == ABI_V4) + return 0; + +#ifdef TARGET_RELOCATABLE + /* Similarly if we are using -mrelocatable, consider all constants + to be hard. */ + if (TARGET_RELOCATABLE) + return 0; +#endif + + switch (mode) + { + case TFmode: + if (TARGET_E500_DOUBLE) + return 0; + + REAL_VALUE_FROM_CONST_DOUBLE (rv, op); + REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k); + + return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1 + && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1 + && num_insns_constant_wide ((HOST_WIDE_INT) k[2]) == 1 + && num_insns_constant_wide ((HOST_WIDE_INT) k[3]) == 1); + + case DFmode: + /* The constant 0.f is easy under VSX. */ + if (op == CONST0_RTX (DFmode) && VECTOR_UNIT_VSX_P (DFmode)) + return 1; + + /* Force constants to memory before reload to utilize + compress_float_constant. + Avoid this when flag_unsafe_math_optimizations is enabled + because RDIV division to reciprocal optimization is not able + to regenerate the division. */ + if (TARGET_E500_DOUBLE + || (!reload_in_progress && !reload_completed + && !flag_unsafe_math_optimizations)) + return 0; + + REAL_VALUE_FROM_CONST_DOUBLE (rv, op); + REAL_VALUE_TO_TARGET_DOUBLE (rv, k); + + return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1 + && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1); + + case SFmode: + /* The constant 0.f is easy. */ + if (op == CONST0_RTX (SFmode)) + return 1; + + /* Force constants to memory before reload to utilize + compress_float_constant. + Avoid this when flag_unsafe_math_optimizations is enabled + because RDIV division to reciprocal optimization is not able + to regenerate the division. */ + if (!reload_in_progress && !reload_completed + && !flag_unsafe_math_optimizations) + return 0; + + REAL_VALUE_FROM_CONST_DOUBLE (rv, op); + REAL_VALUE_TO_TARGET_SINGLE (rv, k[0]); + + return num_insns_constant_wide (k[0]) == 1; + + case DImode: + return ((TARGET_POWERPC64 + && GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_LOW (op) == 0) + || (num_insns_constant (op, DImode) <= 2)); + + case SImode: + return 1; + + default: + gcc_unreachable (); + } +}) + +;; Return 1 if the operand is a CONST_VECTOR and can be loaded into a +;; vector register without using memory. +(define_predicate "easy_vector_constant" + (match_code "const_vector") +{ + /* As the paired vectors are actually FPRs it seems that there is + no easy way to load a CONST_VECTOR without using memory. */ + if (TARGET_PAIRED_FLOAT) + return false; + + if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)) + { + if (zero_constant (op, mode)) + return true; + + return easy_altivec_constant (op, mode); + } + + if (SPE_VECTOR_MODE (mode)) + { + int cst, cst2; + if (zero_constant (op, mode)) + return true; + if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT) + return false; + + /* Limit SPE vectors to 15 bits signed. These we can generate with: + li r0, CONSTANT1 + evmergelo r0, r0, r0 + li r0, CONSTANT2 + + I don't know how efficient it would be to allow bigger constants, + considering we'll have an extra 'ori' for every 'li'. I doubt 5 + instructions is better than a 64-bit memory load, but I don't + have the e500 timing specs. */ + if (mode == V2SImode) + { + cst = INTVAL (CONST_VECTOR_ELT (op, 0)); + cst2 = INTVAL (CONST_VECTOR_ELT (op, 1)); + return cst >= -0x7fff && cst <= 0x7fff + && cst2 >= -0x7fff && cst2 <= 0x7fff; + } + } + + return false; +}) + +;; Same as easy_vector_constant but only for EASY_VECTOR_15_ADD_SELF. +(define_predicate "easy_vector_constant_add_self" + (and (match_code "const_vector") + (and (match_test "TARGET_ALTIVEC") + (match_test "easy_altivec_constant (op, mode)"))) +{ + HOST_WIDE_INT val; + if (mode == V2DImode || mode == V2DFmode) + return 0; + val = const_vector_elt_as_int (op, GET_MODE_NUNITS (mode) - 1); + val = ((val & 0xff) ^ 0x80) - 0x80; + return EASY_VECTOR_15_ADD_SELF (val); +}) + +;; Same as easy_vector_constant but only for EASY_VECTOR_MSB. +(define_predicate "easy_vector_constant_msb" + (and (match_code "const_vector") + (and (match_test "TARGET_ALTIVEC") + (match_test "easy_altivec_constant (op, mode)"))) +{ + HOST_WIDE_INT val; + if (mode == V2DImode || mode == V2DFmode) + return 0; + val = const_vector_elt_as_int (op, GET_MODE_NUNITS (mode) - 1); + return EASY_VECTOR_MSB (val, GET_MODE_INNER (mode)); +}) + +;; Return 1 if operand is constant zero (scalars and vectors). +(define_predicate "zero_constant" + (and (match_code "const_int,const_double,const_vector") + (match_test "op == CONST0_RTX (mode)"))) + +;; Return 1 if operand is 0.0. +;; or non-special register register field no cr0 +(define_predicate "zero_fp_constant" + (and (match_code "const_double") + (match_test "SCALAR_FLOAT_MODE_P (mode) + && op == CONST0_RTX (mode)"))) + +;; Return 1 if the operand is in volatile memory. Note that during the +;; RTL generation phase, memory_operand does not return TRUE for volatile +;; memory references. So this function allows us to recognize volatile +;; references where it's safe. +(define_predicate "volatile_mem_operand" + (and (and (match_code "mem") + (match_test "MEM_VOLATILE_P (op)")) + (if_then_else (match_test "reload_completed") + (match_operand 0 "memory_operand") + (if_then_else (match_test "reload_in_progress") + (match_test "strict_memory_address_p (mode, XEXP (op, 0))") + (match_test "memory_address_p (mode, XEXP (op, 0))"))))) + +;; Return 1 if the operand is an offsettable memory operand. +(define_predicate "offsettable_mem_operand" + (and (match_operand 0 "memory_operand") + (match_test "offsettable_nonstrict_memref_p (op)"))) + +;; Return 1 if the operand is a memory operand with an address divisible by 4 +(define_predicate "word_offset_memref_operand" + (match_operand 0 "memory_operand") +{ + /* Address inside MEM. */ + op = XEXP (op, 0); + + /* Extract address from auto-inc/dec. */ + if (GET_CODE (op) == PRE_INC + || GET_CODE (op) == PRE_DEC) + op = XEXP (op, 0); + else if (GET_CODE (op) == PRE_MODIFY) + op = XEXP (op, 1); + + return (GET_CODE (op) != PLUS + || ! REG_P (XEXP (op, 0)) + || GET_CODE (XEXP (op, 1)) != CONST_INT + || INTVAL (XEXP (op, 1)) % 4 == 0); +}) + +;; Return 1 if the operand is an indexed or indirect memory operand. +(define_predicate "indexed_or_indirect_operand" + (match_code "mem") +{ + op = XEXP (op, 0); + if (VECTOR_MEM_ALTIVEC_P (mode) + && GET_CODE (op) == AND + && GET_CODE (XEXP (op, 1)) == CONST_INT + && INTVAL (XEXP (op, 1)) == -16) + op = XEXP (op, 0); + + return indexed_or_indirect_address (op, mode); +}) + +;; Return 1 if the operand is an indexed or indirect memory operand with an +;; AND -16 in it, used to recognize when we need to switch to Altivec loads +;; to realign loops instead of VSX (altivec silently ignores the bottom bits, +;; while VSX uses the full address and traps) +(define_predicate "altivec_indexed_or_indirect_operand" + (match_code "mem") +{ + op = XEXP (op, 0); + if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) + && GET_CODE (op) == AND + && GET_CODE (XEXP (op, 1)) == CONST_INT + && INTVAL (XEXP (op, 1)) == -16) + return indexed_or_indirect_address (XEXP (op, 0), mode); + + return 0; +}) + +;; Return 1 if the operand is an indexed or indirect address. +(define_special_predicate "indexed_or_indirect_address" + (and (match_test "REG_P (op) + || (GET_CODE (op) == PLUS + /* Omit testing REG_P (XEXP (op, 0)). */ + && REG_P (XEXP (op, 1)))") + (match_operand 0 "address_operand"))) + +;; Used for the destination of the fix_truncdfsi2 expander. +;; If stfiwx will be used, the result goes to memory; otherwise, +;; we're going to emit a store and a load of a subreg, so the dest is a +;; register. +(define_predicate "fix_trunc_dest_operand" + (if_then_else (match_test "! TARGET_E500_DOUBLE && TARGET_PPC_GFXOPT") + (match_operand 0 "memory_operand") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if the operand is either a non-special register or can be used +;; as the operand of a `mode' add insn. +(define_predicate "add_operand" + (if_then_else (match_code "const_int") + (match_test "satisfies_constraint_I (op) + || satisfies_constraint_L (op)") + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if OP is a constant but not a valid add_operand. +(define_predicate "non_add_cint_operand" + (and (match_code "const_int") + (match_test "!satisfies_constraint_I (op) + && !satisfies_constraint_L (op)"))) + +;; Return 1 if the operand is a constant that can be used as the operand +;; of an OR or XOR. +(define_predicate "logical_const_operand" + (match_code "const_int,const_double") +{ + HOST_WIDE_INT opl, oph; + + if (GET_CODE (op) == CONST_INT) + { + opl = INTVAL (op) & GET_MODE_MASK (mode); + + if (HOST_BITS_PER_WIDE_INT <= 32 + && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT && opl < 0) + return 0; + } + else if (GET_CODE (op) == CONST_DOUBLE) + { + gcc_assert (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT); + + opl = CONST_DOUBLE_LOW (op); + oph = CONST_DOUBLE_HIGH (op); + if (oph != 0) + return 0; + } + else + return 0; + + return ((opl & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0 + || (opl & ~ (unsigned HOST_WIDE_INT) 0xffff0000) == 0); +}) + +;; Return 1 if the operand is a non-special register or a constant that +;; can be used as the operand of an OR or XOR. +(define_predicate "logical_operand" + (ior (match_operand 0 "gpc_reg_operand") + (match_operand 0 "logical_const_operand"))) + +;; Return 1 if op is a constant that is not a logical operand, but could +;; be split into one. +(define_predicate "non_logical_cint_operand" + (and (match_code "const_int,const_double") + (and (not (match_operand 0 "logical_operand")) + (match_operand 0 "reg_or_logical_cint_operand")))) + +;; Return 1 if op is a constant that can be encoded in a 32-bit mask, +;; suitable for use with rlwinm (no more than two 1->0 or 0->1 +;; transitions). Reject all ones and all zeros, since these should have +;; been optimized away and confuse the making of MB and ME. +(define_predicate "mask_operand" + (match_code "const_int") +{ + HOST_WIDE_INT c, lsb; + + c = INTVAL (op); + + if (TARGET_POWERPC64) + { + /* Fail if the mask is not 32-bit. */ + if (mode == DImode && (c & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0) + return 0; + + /* Fail if the mask wraps around because the upper 32-bits of the + mask will all be 1s, contrary to GCC's internal view. */ + if ((c & 0x80000001) == 0x80000001) + return 0; + } + + /* We don't change the number of transitions by inverting, + so make sure we start with the LS bit zero. */ + if (c & 1) + c = ~c; + + /* Reject all zeros or all ones. */ + if (c == 0) + return 0; + + /* Find the first transition. */ + lsb = c & -c; + + /* Invert to look for a second transition. */ + c = ~c; + + /* Erase first transition. */ + c &= -lsb; + + /* Find the second transition (if any). */ + lsb = c & -c; + + /* Match if all the bits above are 1's (or c is zero). */ + return c == -lsb; +}) + +;; Return 1 for the PowerPC64 rlwinm corner case. +(define_predicate "mask_operand_wrap" + (match_code "const_int") +{ + HOST_WIDE_INT c, lsb; + + c = INTVAL (op); + + if ((c & 0x80000001) != 0x80000001) + return 0; + + c = ~c; + if (c == 0) + return 0; + + lsb = c & -c; + c = ~c; + c &= -lsb; + lsb = c & -c; + return c == -lsb; +}) + +;; Return 1 if the operand is a constant that is a PowerPC64 mask +;; suitable for use with rldicl or rldicr (no more than one 1->0 or 0->1 +;; transition). Reject all zeros, since zero should have been +;; optimized away and confuses the making of MB and ME. +(define_predicate "mask64_operand" + (match_code "const_int") +{ + HOST_WIDE_INT c, lsb; + + c = INTVAL (op); + + /* Reject all zeros. */ + if (c == 0) + return 0; + + /* We don't change the number of transitions by inverting, + so make sure we start with the LS bit zero. */ + if (c & 1) + c = ~c; + + /* Find the first transition. */ + lsb = c & -c; + + /* Match if all the bits above are 1's (or c is zero). */ + return c == -lsb; +}) + +;; Like mask64_operand, but allow up to three transitions. This +;; predicate is used by insn patterns that generate two rldicl or +;; rldicr machine insns. +(define_predicate "mask64_2_operand" + (match_code "const_int") +{ + HOST_WIDE_INT c, lsb; + + c = INTVAL (op); + + /* Disallow all zeros. */ + if (c == 0) + return 0; + + /* We don't change the number of transitions by inverting, + so make sure we start with the LS bit zero. */ + if (c & 1) + c = ~c; + + /* Find the first transition. */ + lsb = c & -c; + + /* Invert to look for a second transition. */ + c = ~c; + + /* Erase first transition. */ + c &= -lsb; + + /* Find the second transition. */ + lsb = c & -c; + + /* Invert to look for a third transition. */ + c = ~c; + + /* Erase second transition. */ + c &= -lsb; + + /* Find the third transition (if any). */ + lsb = c & -c; + + /* Match if all the bits above are 1's (or c is zero). */ + return c == -lsb; +}) + +;; Like and_operand, but also match constants that can be implemented +;; with two rldicl or rldicr insns. +(define_predicate "and64_2_operand" + (ior (match_operand 0 "mask64_2_operand") + (if_then_else (match_test "fixed_regs[CR0_REGNO]") + (match_operand 0 "gpc_reg_operand") + (match_operand 0 "logical_operand")))) + +;; Return 1 if the operand is either a non-special register or a +;; constant that can be used as the operand of a logical AND. +(define_predicate "and_operand" + (ior (match_operand 0 "mask_operand") + (ior (and (match_test "TARGET_POWERPC64 && mode == DImode") + (match_operand 0 "mask64_operand")) + (if_then_else (match_test "fixed_regs[CR0_REGNO]") + (match_operand 0 "gpc_reg_operand") + (match_operand 0 "logical_operand"))))) + +;; Return 1 if the operand is either a logical operand or a short cint operand. +(define_predicate "scc_eq_operand" + (ior (match_operand 0 "logical_operand") + (match_operand 0 "short_cint_operand"))) + +;; Return 1 if the operand is a general non-special register or memory operand. +(define_predicate "reg_or_mem_operand" + (ior (match_operand 0 "memory_operand") + (ior (and (match_code "mem") + (match_test "macho_lo_sum_memory_operand (op, mode)")) + (ior (match_operand 0 "volatile_mem_operand") + (match_operand 0 "gpc_reg_operand"))))) + +;; Return 1 if the operand is either an easy FP constant or memory or reg. +(define_predicate "reg_or_none500mem_operand" + (if_then_else (match_code "mem") + (and (match_test "!TARGET_E500_DOUBLE") + (ior (match_operand 0 "memory_operand") + (ior (match_test "macho_lo_sum_memory_operand (op, mode)") + (match_operand 0 "volatile_mem_operand")))) + (match_operand 0 "gpc_reg_operand"))) + +;; Return 1 if the operand is CONST_DOUBLE 0, register or memory operand. +(define_predicate "zero_reg_mem_operand" + (ior (match_operand 0 "zero_fp_constant") + (match_operand 0 "reg_or_mem_operand"))) + +;; Return 1 if the operand is a general register or memory operand without +;; pre_inc or pre_dec or pre_modify, which produces invalid form of PowerPC +;; lwa instruction. +(define_predicate "lwa_operand" + (match_code "reg,subreg,mem") +{ + rtx inner, addr, offset; + + inner = op; + if (reload_completed && GET_CODE (inner) == SUBREG) + inner = SUBREG_REG (inner); + + if (gpc_reg_operand (inner, mode)) + return true; + if (!memory_operand (inner, mode)) + return false; + addr = XEXP (inner, 0); + if (GET_CODE (addr) == PRE_INC + || GET_CODE (addr) == PRE_DEC + || (GET_CODE (addr) == PRE_MODIFY + && !legitimate_indexed_address_p (XEXP (addr, 1), 0))) + return false; + if (GET_CODE (addr) == LO_SUM + && GET_CODE (XEXP (addr, 0)) == REG + && GET_CODE (XEXP (addr, 1)) == CONST) + addr = XEXP (XEXP (addr, 1), 0); + if (GET_CODE (addr) != PLUS) + return true; + offset = XEXP (addr, 1); + if (GET_CODE (offset) != CONST_INT) + return true; + return INTVAL (offset) % 4 == 0; +}) + +;; Return 1 if the operand, used inside a MEM, is a SYMBOL_REF. +(define_predicate "symbol_ref_operand" + (and (match_code "symbol_ref") + (match_test "(mode == VOIDmode || GET_MODE (op) == mode) + && (DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))"))) + +;; Return 1 if op is an operand that can be loaded via the GOT. +;; or non-special register register field no cr0 +(define_predicate "got_operand" + (match_code "symbol_ref,const,label_ref")) + +;; Return 1 if op is a simple reference that can be loaded via the GOT, +;; excluding labels involving addition. +(define_predicate "got_no_const_operand" + (match_code "symbol_ref,label_ref")) + +;; Return 1 if op is a SYMBOL_REF for a TLS symbol. +(define_predicate "rs6000_tls_symbol_ref" + (and (match_code "symbol_ref") + (match_test "RS6000_SYMBOL_REF_TLS_P (op)"))) + +;; Return 1 if the operand, used inside a MEM, is a valid first argument +;; to CALL. This is a SYMBOL_REF, a pseudo-register, LR or CTR. +(define_predicate "call_operand" + (if_then_else (match_code "reg") + (match_test "REGNO (op) == LR_REGNO + || REGNO (op) == CTR_REGNO + || REGNO (op) >= FIRST_PSEUDO_REGISTER") + (match_code "symbol_ref"))) + +;; Return 1 if the operand is a SYMBOL_REF for a function known to be in +;; this file. +(define_predicate "current_file_function_operand" + (and (match_code "symbol_ref") + (match_test "(DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op)) + && ((SYMBOL_REF_LOCAL_P (op) + && (DEFAULT_ABI != ABI_AIX + || !SYMBOL_REF_EXTERNAL_P (op))) + || (op == XEXP (DECL_RTL (current_function_decl), + 0)))"))) + +;; Return 1 if this operand is a valid input for a move insn. +(define_predicate "input_operand" + (match_code "label_ref,symbol_ref,const,high,reg,subreg,mem, + const_double,const_vector,const_int,plus") +{ + /* Memory is always valid. */ + if (memory_operand (op, mode)) + return 1; + + /* For floating-point, easy constants are valid. */ + if (SCALAR_FLOAT_MODE_P (mode) + && CONSTANT_P (op) + && easy_fp_constant (op, mode)) + return 1; + + /* Allow any integer constant. */ + if (GET_MODE_CLASS (mode) == MODE_INT + && (GET_CODE (op) == CONST_INT + || GET_CODE (op) == CONST_DOUBLE)) + return 1; + + /* Allow easy vector constants. */ + if (GET_CODE (op) == CONST_VECTOR + && easy_vector_constant (op, mode)) + return 1; + + /* Do not allow invalid E500 subregs. */ + if ((TARGET_E500_DOUBLE || TARGET_SPE) + && GET_CODE (op) == SUBREG + && invalid_e500_subreg (op, mode)) + return 0; + + /* For floating-point or multi-word mode, the only remaining valid type + is a register. */ + if (SCALAR_FLOAT_MODE_P (mode) + || GET_MODE_SIZE (mode) > UNITS_PER_WORD) + return register_operand (op, mode); + + /* The only cases left are integral modes one word or smaller (we + do not get called for MODE_CC values). These can be in any + register. */ + if (register_operand (op, mode)) + return 1; + + /* A SYMBOL_REF referring to the TOC is valid. */ + if (legitimate_constant_pool_address_p (op, mode, false)) + return 1; + + /* A constant pool expression (relative to the TOC) is valid */ + if (toc_relative_expr_p (op)) + return 1; + + /* V.4 allows SYMBOL_REFs and CONSTs that are in the small data region + to be valid. */ + if (DEFAULT_ABI == ABI_V4 + && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST) + && small_data_operand (op, Pmode)) + return 1; + + return 0; +}) + +;; Return true if OP is an invalid SUBREG operation on the e500. +(define_predicate "rs6000_nonimmediate_operand" + (match_code "reg,subreg,mem") +{ + if ((TARGET_E500_DOUBLE || TARGET_SPE) + && GET_CODE (op) == SUBREG + && invalid_e500_subreg (op, mode)) + return 0; + + return nonimmediate_operand (op, mode); +}) + +;; Return true if operand is boolean operator. +(define_predicate "boolean_operator" + (match_code "and,ior,xor")) + +;; Return true if operand is OR-form of boolean operator. +(define_predicate "boolean_or_operator" + (match_code "ior,xor")) + +;; Return true if operand is an equality operator. +(define_special_predicate "equality_operator" + (match_code "eq,ne")) + +;; Return true if operand is MIN or MAX operator. +(define_predicate "min_max_operator" + (match_code "smin,smax,umin,umax")) + +;; Return 1 if OP is a comparison operation that is valid for a branch +;; instruction. We check the opcode against the mode of the CC value. +;; validate_condition_mode is an assertion. +(define_predicate "branch_comparison_operator" + (and (match_operand 0 "comparison_operator") + (and (match_test "GET_MODE_CLASS (GET_MODE (XEXP (op, 0))) == MODE_CC") + (match_test "validate_condition_mode (GET_CODE (op), + GET_MODE (XEXP (op, 0))), + 1")))) + +(define_predicate "rs6000_cbranch_operator" + (if_then_else (match_test "TARGET_HARD_FLOAT && !TARGET_FPRS") + (match_operand 0 "ordered_comparison_operator") + (match_operand 0 "comparison_operator"))) + +;; Return 1 if OP is a comparison operation that is valid for an SCC insn -- +;; it must be a positive comparison. +(define_predicate "scc_comparison_operator" + (and (match_operand 0 "branch_comparison_operator") + (match_code "eq,lt,gt,ltu,gtu,unordered"))) + +;; Return 1 if OP is a comparison operation whose inverse would be valid for +;; an SCC insn. +(define_predicate "scc_rev_comparison_operator" + (and (match_operand 0 "branch_comparison_operator") + (match_code "ne,le,ge,leu,geu,ordered"))) + +;; Return 1 if OP is a comparison operation that is valid for a branch +;; insn, which is true if the corresponding bit in the CC register is set. +(define_predicate "branch_positive_comparison_operator" + (and (match_operand 0 "branch_comparison_operator") + (match_code "eq,lt,gt,ltu,gtu,unordered"))) + +;; Return 1 if OP is a load multiple operation, known to be a PARALLEL. +(define_predicate "load_multiple_operation" + (match_code "parallel") +{ + int count = XVECLEN (op, 0); + unsigned int dest_regno; + rtx src_addr; + int i; + + /* Perform a quick check so we don't blow up below. */ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM) + return 0; + + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0))); + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0); + + for (i = 1; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != SImode + || REGNO (SET_DEST (elt)) != dest_regno + i + || GET_CODE (SET_SRC (elt)) != MEM + || GET_MODE (SET_SRC (elt)) != SImode + || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) + || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4) + return 0; + } + + return 1; +}) + +;; Return 1 if OP is a store multiple operation, known to be a PARALLEL. +;; The second vector element is a CLOBBER. +(define_predicate "store_multiple_operation" + (match_code "parallel") +{ + int count = XVECLEN (op, 0) - 1; + unsigned int src_regno; + rtx dest_addr; + int i; + + /* Perform a quick check so we don't blow up below. */ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG) + return 0; + + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0))); + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0); + + for (i = 1; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i + 1); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != SImode + || REGNO (SET_SRC (elt)) != src_regno + i + || GET_CODE (SET_DEST (elt)) != MEM + || GET_MODE (SET_DEST (elt)) != SImode + || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) + || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4) + return 0; + } + + return 1; +}) + +;; Return 1 if OP is valid for a save_world call in prologue, known to be +;; a PARLLEL. +(define_predicate "save_world_operation" + (match_code "parallel") +{ + int index; + int i; + rtx elt; + int count = XVECLEN (op, 0); + + if (count != 54) + return 0; + + index = 0; + if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER + || GET_CODE (XVECEXP (op, 0, index++)) != USE) + return 0; + + for (i=1; i <= 18; i++) + { + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != MEM + || ! memory_operand (SET_DEST (elt), DFmode) + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != DFmode) + return 0; + } + + for (i=1; i <= 12; i++) + { + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != MEM + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != V4SImode) + return 0; + } + + for (i=1; i <= 19; i++) + { + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != MEM + || ! memory_operand (SET_DEST (elt), Pmode) + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != Pmode) + return 0; + } + + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != MEM + || ! memory_operand (SET_DEST (elt), Pmode) + || GET_CODE (SET_SRC (elt)) != REG + || REGNO (SET_SRC (elt)) != CR2_REGNO + || GET_MODE (SET_SRC (elt)) != Pmode) + return 0; + + if (GET_CODE (XVECEXP (op, 0, index++)) != SET + || GET_CODE (XVECEXP (op, 0, index++)) != SET) + return 0; + return 1; +}) + +;; Return 1 if OP is valid for a restore_world call in epilogue, known to be +;; a PARLLEL. +(define_predicate "restore_world_operation" + (match_code "parallel") +{ + int index; + int i; + rtx elt; + int count = XVECLEN (op, 0); + + if (count != 59) + return 0; + + index = 0; + if (GET_CODE (XVECEXP (op, 0, index++)) != RETURN + || GET_CODE (XVECEXP (op, 0, index++)) != USE + || GET_CODE (XVECEXP (op, 0, index++)) != USE + || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER) + return 0; + + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != MEM + || ! memory_operand (SET_SRC (elt), Pmode) + || GET_CODE (SET_DEST (elt)) != REG + || REGNO (SET_DEST (elt)) != CR2_REGNO + || GET_MODE (SET_DEST (elt)) != Pmode) + return 0; + + for (i=1; i <= 19; i++) + { + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != MEM + || ! memory_operand (SET_SRC (elt), Pmode) + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != Pmode) + return 0; + } + + for (i=1; i <= 12; i++) + { + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != MEM + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != V4SImode) + return 0; + } + + for (i=1; i <= 18; i++) + { + elt = XVECEXP (op, 0, index++); + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != MEM + || ! memory_operand (SET_SRC (elt), DFmode) + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != DFmode) + return 0; + } + + if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER + || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER + || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER + || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER + || GET_CODE (XVECEXP (op, 0, index++)) != USE) + return 0; + return 1; +}) + +;; Return 1 if OP is valid for a vrsave call, known to be a PARALLEL. +(define_predicate "vrsave_operation" + (match_code "parallel") +{ + int count = XVECLEN (op, 0); + unsigned int dest_regno, src_regno; + int i; + + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC_VOLATILE + || XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPECV_SET_VRSAVE) + return 0; + + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0))); + src_regno = REGNO (XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 1)); + + if (dest_regno != VRSAVE_REGNO || src_regno != VRSAVE_REGNO) + return 0; + + for (i = 1; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != CLOBBER + && GET_CODE (elt) != SET) + return 0; + } + + return 1; +}) + +;; Return 1 if OP is valid for mfcr insn, known to be a PARALLEL. +(define_predicate "mfcr_operation" + (match_code "parallel") +{ + int count = XVECLEN (op, 0); + int i; + + /* Perform a quick check so we don't blow up below. */ + if (count < 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC + || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2) + return 0; + + for (i = 0; i < count; i++) + { + rtx exp = XVECEXP (op, 0, i); + rtx unspec; + int maskval; + rtx src_reg; + + src_reg = XVECEXP (SET_SRC (exp), 0, 0); + + if (GET_CODE (src_reg) != REG + || GET_MODE (src_reg) != CCmode + || ! CR_REGNO_P (REGNO (src_reg))) + return 0; + + if (GET_CODE (exp) != SET + || GET_CODE (SET_DEST (exp)) != REG + || GET_MODE (SET_DEST (exp)) != SImode + || ! INT_REGNO_P (REGNO (SET_DEST (exp)))) + return 0; + unspec = SET_SRC (exp); + maskval = 1 << (MAX_CR_REGNO - REGNO (src_reg)); + + if (GET_CODE (unspec) != UNSPEC + || XINT (unspec, 1) != UNSPEC_MOVESI_FROM_CR + || XVECLEN (unspec, 0) != 2 + || XVECEXP (unspec, 0, 0) != src_reg + || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT + || INTVAL (XVECEXP (unspec, 0, 1)) != maskval) + return 0; + } + return 1; +}) + +;; Return 1 if OP is valid for mtcrf insn, known to be a PARALLEL. +(define_predicate "mtcrf_operation" + (match_code "parallel") +{ + int count = XVECLEN (op, 0); + int i; + rtx src_reg; + + /* Perform a quick check so we don't blow up below. */ + if (count < 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC + || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2) + return 0; + src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0); + + if (GET_CODE (src_reg) != REG + || GET_MODE (src_reg) != SImode + || ! INT_REGNO_P (REGNO (src_reg))) + return 0; + + for (i = 0; i < count; i++) + { + rtx exp = XVECEXP (op, 0, i); + rtx unspec; + int maskval; + + if (GET_CODE (exp) != SET + || GET_CODE (SET_DEST (exp)) != REG + || GET_MODE (SET_DEST (exp)) != CCmode + || ! CR_REGNO_P (REGNO (SET_DEST (exp)))) + return 0; + unspec = SET_SRC (exp); + maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp))); + + if (GET_CODE (unspec) != UNSPEC + || XINT (unspec, 1) != UNSPEC_MOVESI_TO_CR + || XVECLEN (unspec, 0) != 2 + || XVECEXP (unspec, 0, 0) != src_reg + || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT + || INTVAL (XVECEXP (unspec, 0, 1)) != maskval) + return 0; + } + return 1; +}) + +;; Return 1 if OP is valid for lmw insn, known to be a PARALLEL. +(define_predicate "lmw_operation" + (match_code "parallel") +{ + int count = XVECLEN (op, 0); + unsigned int dest_regno; + rtx src_addr; + unsigned int base_regno; + HOST_WIDE_INT offset; + int i; + + /* Perform a quick check so we don't blow up below. */ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM) + return 0; + + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0))); + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0); + + if (dest_regno > 31 + || count != 32 - (int) dest_regno) + return 0; + + if (legitimate_indirect_address_p (src_addr, 0)) + { + offset = 0; + base_regno = REGNO (src_addr); + if (base_regno == 0) + return 0; + } + else if (rs6000_legitimate_offset_address_p (SImode, src_addr, 0)) + { + offset = INTVAL (XEXP (src_addr, 1)); + base_regno = REGNO (XEXP (src_addr, 0)); + } + else + return 0; + + for (i = 0; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + rtx newaddr; + rtx addr_reg; + HOST_WIDE_INT newoffset; + + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != SImode + || REGNO (SET_DEST (elt)) != dest_regno + i + || GET_CODE (SET_SRC (elt)) != MEM + || GET_MODE (SET_SRC (elt)) != SImode) + return 0; + newaddr = XEXP (SET_SRC (elt), 0); + if (legitimate_indirect_address_p (newaddr, 0)) + { + newoffset = 0; + addr_reg = newaddr; + } + else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0)) + { + addr_reg = XEXP (newaddr, 0); + newoffset = INTVAL (XEXP (newaddr, 1)); + } + else + return 0; + if (REGNO (addr_reg) != base_regno + || newoffset != offset + 4 * i) + return 0; + } + + return 1; +}) + +;; Return 1 if OP is valid for stmw insn, known to be a PARALLEL. +(define_predicate "stmw_operation" + (match_code "parallel") +{ + int count = XVECLEN (op, 0); + unsigned int src_regno; + rtx dest_addr; + unsigned int base_regno; + HOST_WIDE_INT offset; + int i; + + /* Perform a quick check so we don't blow up below. */ + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM + || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG) + return 0; + + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0))); + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0); + + if (src_regno > 31 + || count != 32 - (int) src_regno) + return 0; + + if (legitimate_indirect_address_p (dest_addr, 0)) + { + offset = 0; + base_regno = REGNO (dest_addr); + if (base_regno == 0) + return 0; + } + else if (rs6000_legitimate_offset_address_p (SImode, dest_addr, 0)) + { + offset = INTVAL (XEXP (dest_addr, 1)); + base_regno = REGNO (XEXP (dest_addr, 0)); + } + else + return 0; + + for (i = 0; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + rtx newaddr; + rtx addr_reg; + HOST_WIDE_INT newoffset; + + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != SImode + || REGNO (SET_SRC (elt)) != src_regno + i + || GET_CODE (SET_DEST (elt)) != MEM + || GET_MODE (SET_DEST (elt)) != SImode) + return 0; + newaddr = XEXP (SET_DEST (elt), 0); + if (legitimate_indirect_address_p (newaddr, 0)) + { + newoffset = 0; + addr_reg = newaddr; + } + else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0)) + { + addr_reg = XEXP (newaddr, 0); + newoffset = INTVAL (XEXP (newaddr, 1)); + } + else + return 0; + if (REGNO (addr_reg) != base_regno + || newoffset != offset + 4 * i) + return 0; + } + + return 1; +}) diff --git a/gcc/config/rs6000/rios1.md b/gcc/config/rs6000/rios1.md new file mode 100644 index 000000000..9ad9ce3e1 --- /dev/null +++ b/gcc/config/rs6000/rios1.md @@ -0,0 +1,191 @@ +;; Scheduling description for IBM POWER processor. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "rios1,rios1fp") +(define_cpu_unit "iu_rios1" "rios1") +(define_cpu_unit "fpu_rios1" "rios1fp") +(define_cpu_unit "bpu_rios1" "rios1") + +;; RIOS1 32-bit IU, FPU, BPU + +(define_insn_reservation "rios1-load" 2 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\ + load_l,store_c,sync") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1") + +(define_insn_reservation "rios1-store" 2 + (and (eq_attr "type" "store,store_ux,store_u") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1") + +(define_insn_reservation "rios1-fpload" 2 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "rios1")) + "iu_rios1") + +(define_insn_reservation "ppc601-fpload" 3 + (and (eq_attr "type" "fpload,fpload_ux,fpload_u") + (eq_attr "cpu" "ppc601")) + "iu_rios1") + +(define_insn_reservation "rios1-fpstore" 3 + (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1+fpu_rios1") + +(define_insn_reservation "rios1-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\ + trap,var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1") + +(define_insn_reservation "rios1-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1,iu_rios1") + +(define_insn_reservation "rios1-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1,iu_rios1,iu_rios1") + +(define_insn_reservation "rios1-imul" 5 + (and (eq_attr "type" "imul,imul_compare") + (eq_attr "cpu" "rios1")) + "iu_rios1*5") + +(define_insn_reservation "rios1-imul2" 4 + (and (eq_attr "type" "imul2") + (eq_attr "cpu" "rios1")) + "iu_rios1*4") + +(define_insn_reservation "rios1-imul3" 3 + (and (eq_attr "type" "imul") + (eq_attr "cpu" "rios1")) + "iu_rios1*3") + +(define_insn_reservation "ppc601-imul" 5 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "ppc601")) + "iu_rios1*5") + +(define_insn_reservation "rios1-idiv" 19 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "rios1")) + "iu_rios1*19") + +(define_insn_reservation "ppc601-idiv" 36 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "ppc601")) + "iu_rios1*36") + +; compare executes on integer unit, but feeds insns which +; execute on the branch unit. +(define_insn_reservation "rios1-compare" 4 + (and (eq_attr "type" "cmp,fast_compare,compare") + (eq_attr "cpu" "rios1")) + "iu_rios1,nothing*2,bpu_rios1") + +(define_insn_reservation "rios1-delayed_compare" 5 + (and (eq_attr "type" "delayed_compare,var_delayed_compare") + (eq_attr "cpu" "rios1")) + "iu_rios1,nothing*3,bpu_rios1") + +(define_insn_reservation "ppc601-compare" 3 + (and (eq_attr "type" "cmp,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "ppc601")) + "iu_rios1,nothing,bpu_rios1") + +(define_insn_reservation "rios1-fpcompare" 9 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "rios1")) + "fpu_rios1,nothing*3,bpu_rios1") + +(define_insn_reservation "ppc601-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "ppc601")) + "(fpu_rios1+iu_rios1*2),nothing*2,bpu_rios1") + +(define_insn_reservation "rios1-fp" 2 + (and (eq_attr "type" "fp,dmul") + (eq_attr "cpu" "rios1")) + "fpu_rios1") + +(define_insn_reservation "ppc601-fp" 4 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "ppc601")) + "fpu_rios1") + +(define_insn_reservation "rios1-dmul" 5 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "ppc601")) + "fpu_rios1*2") + +(define_insn_reservation "rios1-sdiv" 19 + (and (eq_attr "type" "sdiv,ddiv") + (eq_attr "cpu" "rios1")) + "fpu_rios1*19") + +(define_insn_reservation "ppc601-sdiv" 17 + (and (eq_attr "type" "sdiv") + (eq_attr "cpu" "ppc601")) + "fpu_rios1*17") + +(define_insn_reservation "ppc601-ddiv" 31 + (and (eq_attr "type" "ddiv") + (eq_attr "cpu" "ppc601")) + "fpu_rios1*31") + +(define_insn_reservation "rios1-mfcr" 2 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1,bpu_rios1") + +(define_insn_reservation "rios1-mtcr" 4 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1,bpu_rios1") + +(define_insn_reservation "rios1-crlogical" 4 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "rios1,ppc601")) + "bpu_rios1") + +(define_insn_reservation "rios1-mtjmpr" 5 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "rios1")) + "iu_rios1,bpu_rios1") + +(define_insn_reservation "ppc601-mtjmpr" 4 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "ppc601")) + "iu_rios1,bpu_rios1") + +(define_insn_reservation "rios1-mfjmpr" 2 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "rios1,ppc601")) + "iu_rios1,bpu_rios1") + +(define_insn_reservation "rios1-branch" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "rios1,ppc601")) + "bpu_rios1") + diff --git a/gcc/config/rs6000/rios2.md b/gcc/config/rs6000/rios2.md new file mode 100644 index 000000000..96633af2f --- /dev/null +++ b/gcc/config/rs6000/rios2.md @@ -0,0 +1,129 @@ +;; Scheduling description for IBM Power2 processor. +;; Copyright (C) 2003, 2004, 2007, 2009 Free Software Foundation, Inc. +;; +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published +;; by the Free Software Foundation; either version 3, or (at your +;; option) any later version. + +;; GCC is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +;; License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +(define_automaton "rios2,rios2fp") +(define_cpu_unit "iu1_rios2,iu2_rios2" "rios2") +(define_cpu_unit "fpu1_rios2,fpu2_rios2" "rios2fp") +(define_cpu_unit "bpu_rios2" "rios2") + +;; RIOS2 32-bit 2xIU, 2xFPU, BPU +;; IU1 can perform all integer operations +;; IU2 can perform all integer operations except imul and idiv + +(define_insn_reservation "rios2-load" 2 + (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\ + load_ux,load_u,fpload,fpload_ux,fpload_u,\ + load_l,store_c,sync") + (eq_attr "cpu" "rios2")) + "iu1_rios2|iu2_rios2") + +(define_insn_reservation "rios2-store" 2 + (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u") + (eq_attr "cpu" "rios2")) + "iu1_rios2|iu2_rios2") + +(define_insn_reservation "rios2-integer" 1 + (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\ + var_shift_rotate,cntlz,exts,isel") + (eq_attr "cpu" "rios2")) + "iu1_rios2|iu2_rios2") + +(define_insn_reservation "rios2-two" 1 + (and (eq_attr "type" "two") + (eq_attr "cpu" "rios2")) + "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2") + +(define_insn_reservation "rios2-three" 1 + (and (eq_attr "type" "three") + (eq_attr "cpu" "rios2")) + "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2") + +(define_insn_reservation "rios2-imul" 2 + (and (eq_attr "type" "imul,imul2,imul3,imul_compare") + (eq_attr "cpu" "rios2")) + "iu1_rios2*2") + +(define_insn_reservation "rios2-idiv" 13 + (and (eq_attr "type" "idiv") + (eq_attr "cpu" "rios2")) + "iu1_rios2*13") + +; compare executes on integer unit, but feeds insns which +; execute on the branch unit. +(define_insn_reservation "rios2-compare" 3 + (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\ + var_delayed_compare") + (eq_attr "cpu" "rios2")) + "(iu1_rios2|iu2_rios2),nothing,bpu_rios2") + +(define_insn_reservation "rios2-fp" 2 + (and (eq_attr "type" "fp") + (eq_attr "cpu" "rios2")) + "fpu1_rios2|fpu2_rios2") + +(define_insn_reservation "rios2-fpcompare" 5 + (and (eq_attr "type" "fpcompare") + (eq_attr "cpu" "rios2")) + "(fpu1_rios2|fpu2_rios2),nothing*3,bpu_rios2") + +(define_insn_reservation "rios2-dmul" 2 + (and (eq_attr "type" "dmul") + (eq_attr "cpu" "rios2")) + "fpu1_rios2|fpu2_rios2") + +(define_insn_reservation "rios2-sdiv" 17 + (and (eq_attr "type" "sdiv,ddiv") + (eq_attr "cpu" "rios2")) + "(fpu1_rios2*17)|(fpu2_rios2*17)") + +(define_insn_reservation "rios2-ssqrt" 26 + (and (eq_attr "type" "ssqrt,dsqrt") + (eq_attr "cpu" "rios2")) + "(fpu1_rios2*26)|(fpu2_rios2*26)") + +(define_insn_reservation "rios2-mfcr" 2 + (and (eq_attr "type" "mfcr") + (eq_attr "cpu" "rios2")) + "iu1_rios2,bpu_rios2") + +(define_insn_reservation "rios2-mtcr" 3 + (and (eq_attr "type" "mtcr") + (eq_attr "cpu" "rios2")) + "iu1_rios2,bpu_rios2") + +(define_insn_reservation "rios2-crlogical" 3 + (and (eq_attr "type" "cr_logical,delayed_cr") + (eq_attr "cpu" "rios2")) + "bpu_rios2") + +(define_insn_reservation "rios2-mtjmpr" 5 + (and (eq_attr "type" "mtjmpr") + (eq_attr "cpu" "rios2")) + "iu1_rios2,bpu_rios2") + +(define_insn_reservation "rios2-mfjmpr" 2 + (and (eq_attr "type" "mfjmpr") + (eq_attr "cpu" "rios2")) + "iu1_rios2,bpu_rios2") + +(define_insn_reservation "rios2-branch" 1 + (and (eq_attr "type" "jmpreg,branch,isync") + (eq_attr "cpu" "rios2")) + "bpu_rios2") + diff --git a/gcc/config/rs6000/rs6000-builtin.def b/gcc/config/rs6000/rs6000-builtin.def new file mode 100644 index 000000000..7bd6b20fa --- /dev/null +++ b/gcc/config/rs6000/rs6000-builtin.def @@ -0,0 +1,1020 @@ +/* Builtin functions for rs6000/powerpc. + Copyright (C) 2009, 2010, 2011 + Free Software Foundation, Inc. + Contributed by Michael Meissner (meissner@linux.vnet.ibm.com) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Before including this file, two macros must be defined: + RS6000_BUILTIN -- 2 arguments, the enum name, and classification + RS6000_BUILTIN_EQUATE -- 2 arguments, enum name and value */ + +/* AltiVec builtins. */ +RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_4si, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_4si, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_8hi, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_8hi, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_16qi, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_16qi, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_4sf, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_4sf, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_2df, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_2df, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_2di, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_2di, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUBM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUHM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUWM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDCUW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUBS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDSBS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDSHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUWS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDSWS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VAND, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VANDC, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VAVGUB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VAVGSB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VAVGUH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VAVGSH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VAVGUW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VAVGSW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCFUX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCFSX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCTSXS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCTUXS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPBFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQUB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQUH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQUW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGEFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTUB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTSB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTUH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTSH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTUW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTSW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEXPTEFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VLOGEFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMADDFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMAXUB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMAXSB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMAXUH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMAXSH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMAXUW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMAXSW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMAXFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMHADDSHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMHRADDSHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMLADDUHM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMRGHB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMRGHH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMRGHW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMRGLB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMRGLH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMRGLW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMSUMUBM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMSUMMBM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMSUMUHM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMSUMSHM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMSUMUHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMSUMSHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMINUB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMINSB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMINUH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMINSH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMINUW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMINSW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMINFP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULEUB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULEUB_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULESB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULEUH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULEUH_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULESH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULOUB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULOUB_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULOSB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULOUH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULOUH_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VMULOSH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VNMSUBFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VNOR, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VOR, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_2DI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_4SI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_8HI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSEL_16QI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_2DI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_4SI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_8HI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPERM_16QI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKUHUM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKUWUM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKPX, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKUHSS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKSHSS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKUWSS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKSWSS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKUHUS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKSHUS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKUWUS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VPKSWUS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VREFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRFIM, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRFIN, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRFIP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRFIZ, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRLB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRLH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRLW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRSQRTFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRSQRTEFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSL, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLO, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSPLTB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSPLTH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSPLTW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSPLTISB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSPLTISH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSPLTISW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSRB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSRH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSRW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSRAB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSRAH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSRAW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSR, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSRO, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBUBM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBUHM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBUWM, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBFP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBCUW, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBUBS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBSBS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBUHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBSHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBUWS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUBSWS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUM4UBS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUM4SBS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUM4SHS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUM2SWS, RS6000_BTC_SAT) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSUMSWS, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VXOR, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLDOI_16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLDOI_8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLDOI_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VSLDOI_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VUPKHSB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VUPKHPX, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VUPKHSH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VUPKLSB, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VUPKLPX, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VUPKLSH, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_MTVSCR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_MFVSCR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_DSSALL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_DSS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVSL, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVSR, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_DSTT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_DSTST, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_DSTSTT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_DST, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEBX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEHX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEWX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVXL, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVLX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVLXL, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVRX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_LVRXL, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEBX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEHX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEWX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVXL, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVLX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVLXL, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVRX, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_STVRXL, RS6000_BTC_MEM) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPBFP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQFP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQUB_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQUH_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQUW_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGEFP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTFP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTSB_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTSH_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTSW_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTUB_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTUH_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGTUW_P, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ABSS_V4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ABSS_V8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ABSS_V16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ABS_V4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ABS_V4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ABS_V8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_ABS_V16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_MASK_FOR_LOAD, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_MASK_FOR_STORE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_INIT_V4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_INIT_V8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_INIT_V16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_INIT_V4SF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SET_V4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SET_V8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SET_V16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SET_V4SF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_EXT_V4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_EXT_V8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_EXT_V16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_EXT_V4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_COPYSIGN_V4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VRECIPFP, RS6000_BTC_FP_PURE) + +/* Altivec overloaded builtins. */ +/* For now, don't set the classification for overloaded functions. + The function should be converted to the type specific instruction + before we get to the point about classifying the builtin type. */ +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQ_P, RS6000_BTC_MISC) +RS6000_BUILTIN_EQUATE(ALTIVEC_BUILTIN_OVERLOADED_FIRST, + ALTIVEC_BUILTIN_VCMPEQ_P) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGT_P, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGE_P, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ABS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ABSS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ADD, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ADDC, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ADDS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_AND, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ANDC, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_AVG, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_EXTRACT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CEIL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPEQ, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPEQUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPEQUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPEQUW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPGE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPGT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPLE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CMPLT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_COPYSIGN, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CTF, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CTS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_CTU, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_DST, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_DSTST, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_DSTSTT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_DSTT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_EXPTE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_FLOOR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LD, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LDE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LDL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LOGE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEBX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEHX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEWX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVLX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVLXL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVRX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVRXL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVSL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVSR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MADD, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MADDS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MAX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MERGEH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MERGEL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MIN, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MLADD, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MPERM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MRADDS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MRGHB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MRGHH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MRGHW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MRGLB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MRGLH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MRGLW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MSUM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MSUMS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MTVSCR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MULE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MULO, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_NEARBYINT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_NMSUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_NOR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_OR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_PACK, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_PACKPX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_PACKS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_PACKSU, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_PERM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_RE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_RECIP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_RL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_RINT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ROUND, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_RSQRT, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_RSQRTE, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SEL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SLD, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SLL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SLO, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLAT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLAT_S16, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLAT_S32, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLAT_S8, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLAT_U16, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLAT_U32, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLAT_U8, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLTB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLTH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLTW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SQRT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SRA, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SRL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SRO, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ST, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEBX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEHX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEWX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVLX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVLXL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVRX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVRXL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUBC, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUBS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUM2S, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUM4S, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUMS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_TRUNC, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_UNPACKH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_UNPACKL, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDFP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDSBS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDSHS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDSWS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDUBM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDUBS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDUHM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDUHS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDUWM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VADDUWS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VAVGSB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VAVGSH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VAVGSW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VAVGUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VAVGUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VAVGUW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCFSX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCFUX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPEQFP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPEQUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPEQUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPEQUW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPGTFP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPGTSB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPGTSH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPGTSW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPGTUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPGTUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VCMPGTUW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMAXFP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMAXSB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMAXSH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMAXSW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMAXUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMAXUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMAXUW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMINFP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMINSB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMINSH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMINSW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMINUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMINUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMINUW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMRGHB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMRGHH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMRGHW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMRGLB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMRGLH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMRGLW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMSUMMBM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMSUMSHM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMSUMSHS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMSUMUBM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMSUMUHM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMSUMUHS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULESB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULESH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULEUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULEUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULOSB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULOSH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULOUB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VMULOUH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKSHSS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKSHUS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKSWSS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKSWUS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKUHUM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKUHUS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKUWUM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VPKUWUS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VRLB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VRLH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VRLW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSLB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSLH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSLW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSPLTB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSPLTH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSPLTW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSRAB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSRAH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSRAW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSRB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSRH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSRW, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBFP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBSBS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBSHS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBSWS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBUBM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBUBS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBUHM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBUHS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBUWM, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUBUWS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUM4SBS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUM4SHS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VSUM4UBS, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VUPKHPX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VUPKHSB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VUPKHSH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VUPKLPX, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VUPKLSB, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_VUPKLSH, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_XOR, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STEP, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_PROMOTE, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_INSERT, RS6000_BTC_MISC) +RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SPLATS, RS6000_BTC_MISC) +RS6000_BUILTIN_EQUATE(ALTIVEC_BUILTIN_OVERLOADED_LAST, + ALTIVEC_BUILTIN_VEC_SPLATS) + +/* SPE builtins. */ +RS6000_BUILTIN(SPE_BUILTIN_EVADDW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVAND, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVANDC, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVDIVWS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVDIVWU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVEQV, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSADD, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSDIV, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSMUL, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSSUB, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLDDX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLDHX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLDWX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLHHESPLATX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLHHOSSPLATX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLHHOUSPLATX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHEX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHOSX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHOUX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHSPLATX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWWSPLATX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMERGEHI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMERGEHILO, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMERGELO, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMERGELOHI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEGSMFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEGSMFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEGSMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEGSMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEGUMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEGUMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMFAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMFANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESMIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESSF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESSFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESSFAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESSFANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHESSIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEUMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEUMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEUMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEUMIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEUSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHEUSIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOGSMFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOGSMFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOGSMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOGSMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOGUMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOGUMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMFAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMFANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSMIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSSF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSSFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSSFAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSSFANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOSSIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOUMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOUMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOUMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOUMIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOUSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMHOUSIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSSF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSSFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHUMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHUMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLSMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLSMIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLSSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLSSIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLUMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLUMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLUMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLUMIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLUSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWLUSIANW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSSFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSSF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSSFA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSSFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWSSFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWUMI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWUMIA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWUMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWUMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVNAND, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVNOR, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVOR, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVORC, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVRLW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSLW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSRWS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSRWU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTDDX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTDHX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTDWX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWHEX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWHOX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWWEX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWWOX, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSUBFW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVXOR, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVABS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVADDSMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVADDSSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVADDUMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVADDUSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVCNTLSW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVCNTLZW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVEXTSB, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVEXTSH, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSABS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCFSF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCFSI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCFUF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCFUI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCTSF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCTSI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCTSIZ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCTUF, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCTUI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCTUIZ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSNABS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSNEG, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMRA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVNEG, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVRNDW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSUBFSMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSUBFSSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSUBFUMIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSUBFUSIAAW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVADDIW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLDD, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLDH, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLDW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLHHESPLAT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLHHOSSPLAT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLHHOUSPLAT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHE, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHOS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHOU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWHSPLAT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVLWWSPLAT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVRLWI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSLWI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSRWIS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSRWIU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTDD, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTDH, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTDW, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWHE, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWHO, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWWE, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSTWWO, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSUBIFW, RS6000_BTC_MISC) + + /* Compares. */ +RS6000_BUILTIN(SPE_BUILTIN_EVCMPEQ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVCMPGTS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVCMPGTU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVCMPLTS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVCMPLTU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCMPEQ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCMPGT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSCMPLT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSTSTEQ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSTSTGT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVFSTSTLT, RS6000_BTC_MISC) + +/* EVSEL compares. */ +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_CMPEQ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_CMPGTS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_CMPGTU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_CMPLTS, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_CMPLTU, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_FSCMPEQ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_FSCMPGT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_FSCMPLT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_FSTSTEQ, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_FSTSTGT, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSEL_FSTSTLT, RS6000_BTC_MISC) + +RS6000_BUILTIN(SPE_BUILTIN_EVSPLATFI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVSPLATI, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSSMAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHUSIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHUMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSSFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSSIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHSMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHUSIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHUMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGSSFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGSMFAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGSMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGUMIAA, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGSSFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGSMFAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGSMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_EVMWHGUMIAN, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_MTSPEFSCR, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_MFSPEFSCR, RS6000_BTC_MISC) +RS6000_BUILTIN(SPE_BUILTIN_BRINC, RS6000_BTC_MISC) + +/* PAIRED builtins. */ +RS6000_BUILTIN(PAIRED_BUILTIN_DIVV2SF3, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_ABSV2SF2, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_NEGV2SF2, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_SQRTV2SF2, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_ADDV2SF3, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_SUBV2SF3, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_RESV2SF2, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MULV2SF3, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MSUB, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MADD, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_NMSUB, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_NMADD, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_NABSV2SF2, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_SUM0, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_SUM1, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MULS0, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MULS1, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MERGE00, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MERGE01, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MERGE10, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MERGE11, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MADDS0, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_MADDS1, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_STX, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_LX, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_SELV2SF4, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_CMPU0, RS6000_BTC_MISC) +RS6000_BUILTIN(PAIRED_BUILTIN_CMPU1, RS6000_BTC_MISC) + + /* VSX builtins. */ +RS6000_BUILTIN(VSX_BUILTIN_LXSDX, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_LXVD2X_V2DF, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_LXVD2X_V2DI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_LXVDSX, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V4SF, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V4SI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V8HI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V16QI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_STXSDX, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_STXVD2X_V2DF, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_STXVD2X_V2DI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V4SF, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V4SI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V8HI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V16QI, RS6000_BTC_MEM) +RS6000_BUILTIN(VSX_BUILTIN_XSABSDP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XSADDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCMPODP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCMPUDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCPSGNDP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XSCVDPSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCVDPSXDS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCVDPSXWS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCVDPUXDS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCVDPUXWS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCVSPDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCVSXDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSCVUXDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSDIVDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMADDADP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMADDMDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMAXDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMINDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMOVDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMSUBADP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMSUBMDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSMULDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSNABSDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSNEGDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSNMADDADP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSNMADDMDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSNMSUBADP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSNMSUBMDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSRDPI, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSRDPIC, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSRDPIM, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSRDPIP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSRDPIZ, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSREDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSRSQRTEDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSSQRTDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSSUBDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_CPSGNDP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_CPSGNSP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XSTDIVDP_FE, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSTDIVDP_FG, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSTSQRTDP_FE, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XSTSQRTDP_FG, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVABSDP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XVABSSP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XVADDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVADDSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPEQDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPEQSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGEDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGESP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGTDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGTSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPEQDP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPEQSP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGEDP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGESP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGTDP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCMPGTSP_P, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCPSGNDP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XVCPSGNSP, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XVCVDPSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVDPSXDS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVDPSXWS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVDPUXDS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVDPUXDS_UNS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVDPUXWS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSPDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSPSXDS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSPSXWS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSPUXDS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSPUXWS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSXDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSXDSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSXWDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVSXWSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVUXDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVUXDDP_UNS, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVUXDSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVUXWDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVCVUXWSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVDIVDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVDIVSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMADDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMADDSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMAXDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMAXSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMINDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMINSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMSUBDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMSUBSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMULDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVMULSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNABSDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNABSSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNEGDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNEGSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNMADDDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNMADDSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNMSUBDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVNMSUBSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRDPI, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRDPIC, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRDPIM, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRDPIP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRDPIZ, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVREDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRESP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRSPI, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRSPIC, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRSPIM, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRSPIP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRSPIZ, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRSQRTEDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVRSQRTESP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVSQRTDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVSQRTSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVSUBDP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVSUBSP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTDIVDP_FE, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTDIVDP_FG, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTDIVSP_FE, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTDIVSP_FG, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTSQRTDP_FE, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTSQRTDP_FG, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTSQRTSP_FE, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XVTSQRTSP_FG, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_2DI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_4SI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_8HI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSEL_16QI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_2DI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_4SI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_8HI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VPERM_16QI_UNS, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXPERMDI_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXPERMDI_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXPERMDI_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXPERMDI_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXPERMDI_8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXPERMDI_16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_CONCAT_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_CONCAT_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_SET_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_SET_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_SPLAT_2DF, RS6000_BTC_PURE) +RS6000_BUILTIN(VSX_BUILTIN_SPLAT_2DI, RS6000_BTC_PURE) +RS6000_BUILTIN(VSX_BUILTIN_XXMRGHW_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXMRGHW_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXMRGLW_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXMRGLW_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSLDWI_16QI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSLDWI_8HI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSLDWI_4SI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSLDWI_4SF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSLDWI_2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_XXSLDWI_2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_INIT_V2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_INIT_V2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_SET_V2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_SET_V2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_EXT_V2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_EXT_V2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_MERGEL_V2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_MERGEL_V2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_MERGEH_V2DF, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_MERGEH_V2DI, RS6000_BTC_CONST) +RS6000_BUILTIN(VSX_BUILTIN_VEC_RSQRT_V4SF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_VEC_RSQRT_V2DF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_RECIP_V4SF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VSX_BUILTIN_RECIP_V2DF, RS6000_BTC_FP_PURE) + +/* VSX overloaded builtins, add the overloaded functions not present in + Altivec. */ +RS6000_BUILTIN(VSX_BUILTIN_VEC_MUL, RS6000_BTC_MISC) +RS6000_BUILTIN_EQUATE(VSX_BUILTIN_OVERLOADED_FIRST, + VSX_BUILTIN_VEC_MUL) +RS6000_BUILTIN(VSX_BUILTIN_VEC_MSUB, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_NMADD, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUITLIN_VEC_NMSUB, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_DIV, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_XXMRGHW, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_XXMRGLW, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_XXPERMDI, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_XXSLDWI, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_XXSPLTD, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_XXSPLTW, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_LD, RS6000_BTC_MISC) +RS6000_BUILTIN(VSX_BUILTIN_VEC_ST, RS6000_BTC_MISC) +RS6000_BUILTIN_EQUATE(VSX_BUILTIN_OVERLOADED_LAST, + VSX_BUILTIN_VEC_ST) + +/* Combined VSX/Altivec builtins. */ +RS6000_BUILTIN(VECTOR_BUILTIN_FLOAT_V4SI_V4SF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VECTOR_BUILTIN_UNSFLOAT_V4SI_V4SF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VECTOR_BUILTIN_FIX_V4SF_V4SI, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(VECTOR_BUILTIN_FIXUNS_V4SF_V4SI, RS6000_BTC_FP_PURE) + +/* Power7 builtins, that aren't VSX instructions. */ +RS6000_BUILTIN(POWER7_BUILTIN_BPERMD, RS6000_BTC_CONST) + +/* Miscellaneous builtins. */ +RS6000_BUILTIN(RS6000_BUILTIN_RECIP, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(RS6000_BUILTIN_RECIPF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(RS6000_BUILTIN_RSQRTF, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(RS6000_BUILTIN_RSQRT, RS6000_BTC_FP_PURE) +RS6000_BUILTIN(RS6000_BUILTIN_BSWAP_HI, RS6000_BTC_CONST) + +/* Darwin CfString builtin. */ +RS6000_BUILTIN(RS6000_BUILTIN_CFSTRING, RS6000_BTC_MISC) diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c new file mode 100644 index 000000000..3f4f90b23 --- /dev/null +++ b/gcc/config/rs6000/rs6000-c.c @@ -0,0 +1,3772 @@ +/* Subroutines for the C front end on the POWER and PowerPC architectures. + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 + Free Software Foundation, Inc. + + Contributed by Zack Weinberg + and Paolo Bonzini + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "cpplib.h" +#include "tree.h" +#include "c-family/c-common.h" +#include "c-family/c-pragma.h" +#include "diagnostic-core.h" +#include "tm_p.h" +#include "target.h" +#include "langhooks.h" + + + +/* Handle the machine specific pragma longcall. Its syntax is + + # pragma longcall ( TOGGLE ) + + where TOGGLE is either 0 or 1. + + rs6000_default_long_calls is set to the value of TOGGLE, changing + whether or not new function declarations receive a longcall + attribute by default. */ + +#define SYNTAX_ERROR(gmsgid) do { \ + warning (OPT_Wpragmas, gmsgid); \ + warning (OPT_Wpragmas, "ignoring malformed #pragma longcall"); \ + return; \ +} while (0) + +void +rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED) +{ + tree x, n; + + /* If we get here, generic code has already scanned the directive + leader and the word "longcall". */ + + if (pragma_lex (&x) != CPP_OPEN_PAREN) + SYNTAX_ERROR ("missing open paren"); + if (pragma_lex (&n) != CPP_NUMBER) + SYNTAX_ERROR ("missing number"); + if (pragma_lex (&x) != CPP_CLOSE_PAREN) + SYNTAX_ERROR ("missing close paren"); + + if (n != integer_zero_node && n != integer_one_node) + SYNTAX_ERROR ("number must be 0 or 1"); + + if (pragma_lex (&x) != CPP_EOF) + warning (OPT_Wpragmas, "junk at end of #pragma longcall"); + + rs6000_default_long_calls = (n == integer_one_node); +} + +/* Handle defining many CPP flags based on TARGET_xxx. As a general + policy, rather than trying to guess what flags a user might want a + #define for, it's better to define a flag for everything. */ + +#define builtin_define(TXT) cpp_define (pfile, TXT) +#define builtin_assert(TXT) cpp_assert (pfile, TXT) + +/* Keep the AltiVec keywords handy for fast comparisons. */ +static GTY(()) tree __vector_keyword; +static GTY(()) tree vector_keyword; +static GTY(()) tree __pixel_keyword; +static GTY(()) tree pixel_keyword; +static GTY(()) tree __bool_keyword; +static GTY(()) tree bool_keyword; +static GTY(()) tree _Bool_keyword; + +/* Preserved across calls. */ +static tree expand_bool_pixel; + +static cpp_hashnode * +altivec_categorize_keyword (const cpp_token *tok) +{ + if (tok->type == CPP_NAME) + { + cpp_hashnode *ident = tok->val.node.node; + + if (ident == C_CPP_HASHNODE (vector_keyword)) + return C_CPP_HASHNODE (__vector_keyword); + + if (ident == C_CPP_HASHNODE (pixel_keyword)) + return C_CPP_HASHNODE (__pixel_keyword); + + if (ident == C_CPP_HASHNODE (bool_keyword)) + return C_CPP_HASHNODE (__bool_keyword); + + if (ident == C_CPP_HASHNODE (_Bool_keyword)) + return C_CPP_HASHNODE (__bool_keyword); + + return ident; + } + + return 0; +} + +static void +init_vector_keywords (void) +{ + /* Keywords without two leading underscores are context-sensitive, + and hence implemented as conditional macros, controlled by the + rs6000_macro_to_expand() function below. */ + + __vector_keyword = get_identifier ("__vector"); + C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL; + + __pixel_keyword = get_identifier ("__pixel"); + C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL; + + __bool_keyword = get_identifier ("__bool"); + C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL; + + vector_keyword = get_identifier ("vector"); + C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL; + + pixel_keyword = get_identifier ("pixel"); + C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL; + + bool_keyword = get_identifier ("bool"); + C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL; + + _Bool_keyword = get_identifier ("_Bool"); + C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL; +} + +/* Called to decide whether a conditional macro should be expanded. + Since we have exactly one such macro (i.e, 'vector'), we do not + need to examine the 'tok' parameter. */ + +static cpp_hashnode * +rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok) +{ + cpp_hashnode *expand_this = tok->val.node.node; + cpp_hashnode *ident; + + ident = altivec_categorize_keyword (tok); + + if (ident != expand_this) + expand_this = NULL; + + if (ident == C_CPP_HASHNODE (__vector_keyword)) + { + int idx = 0; + do + tok = cpp_peek_token (pfile, idx++); + while (tok->type == CPP_PADDING); + ident = altivec_categorize_keyword (tok); + + if (ident == C_CPP_HASHNODE (__pixel_keyword)) + { + expand_this = C_CPP_HASHNODE (__vector_keyword); + expand_bool_pixel = __pixel_keyword; + } + else if (ident == C_CPP_HASHNODE (__bool_keyword)) + { + expand_this = C_CPP_HASHNODE (__vector_keyword); + expand_bool_pixel = __bool_keyword; + } + else if (ident) + { + enum rid rid_code = (enum rid)(ident->rid_code); + if (ident->type == NT_MACRO) + { + do + (void) cpp_get_token (pfile); + while (--idx > 0); + do + tok = cpp_peek_token (pfile, idx++); + while (tok->type == CPP_PADDING); + ident = altivec_categorize_keyword (tok); + if (ident == C_CPP_HASHNODE (__pixel_keyword)) + { + expand_this = C_CPP_HASHNODE (__vector_keyword); + expand_bool_pixel = __pixel_keyword; + rid_code = RID_MAX; + } + else if (ident == C_CPP_HASHNODE (__bool_keyword)) + { + expand_this = C_CPP_HASHNODE (__vector_keyword); + expand_bool_pixel = __bool_keyword; + rid_code = RID_MAX; + } + else if (ident) + rid_code = (enum rid)(ident->rid_code); + } + + if (rid_code == RID_UNSIGNED || rid_code == RID_LONG + || rid_code == RID_SHORT || rid_code == RID_SIGNED + || rid_code == RID_INT || rid_code == RID_CHAR + || rid_code == RID_FLOAT + || (rid_code == RID_DOUBLE && TARGET_VSX)) + { + expand_this = C_CPP_HASHNODE (__vector_keyword); + /* If the next keyword is bool or pixel, it + will need to be expanded as well. */ + do + tok = cpp_peek_token (pfile, idx++); + while (tok->type == CPP_PADDING); + ident = altivec_categorize_keyword (tok); + + if (ident == C_CPP_HASHNODE (__pixel_keyword)) + expand_bool_pixel = __pixel_keyword; + else if (ident == C_CPP_HASHNODE (__bool_keyword)) + expand_bool_pixel = __bool_keyword; + else + { + /* Try two tokens down, too. */ + do + tok = cpp_peek_token (pfile, idx++); + while (tok->type == CPP_PADDING); + ident = altivec_categorize_keyword (tok); + if (ident == C_CPP_HASHNODE (__pixel_keyword)) + expand_bool_pixel = __pixel_keyword; + else if (ident == C_CPP_HASHNODE (__bool_keyword)) + expand_bool_pixel = __bool_keyword; + } + } + } + } + else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword)) + { + expand_this = C_CPP_HASHNODE (__pixel_keyword); + expand_bool_pixel = 0; + } + else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword)) + { + expand_this = C_CPP_HASHNODE (__bool_keyword); + expand_bool_pixel = 0; + } + + return expand_this; +} + +void +rs6000_cpu_cpp_builtins (cpp_reader *pfile) +{ + if (TARGET_POWER2) + builtin_define ("_ARCH_PWR2"); + else if (TARGET_POWER) + builtin_define ("_ARCH_PWR"); + if (TARGET_POWERPC) + builtin_define ("_ARCH_PPC"); + if (TARGET_PPC_GPOPT) + builtin_define ("_ARCH_PPCSQ"); + if (TARGET_PPC_GFXOPT) + builtin_define ("_ARCH_PPCGR"); + if (TARGET_POWERPC64) + builtin_define ("_ARCH_PPC64"); + if (TARGET_MFCRF) + builtin_define ("_ARCH_PWR4"); + if (TARGET_POPCNTB) + builtin_define ("_ARCH_PWR5"); + if (TARGET_FPRND) + builtin_define ("_ARCH_PWR5X"); + if (TARGET_CMPB) + builtin_define ("_ARCH_PWR6"); + if (TARGET_MFPGPR) + builtin_define ("_ARCH_PWR6X"); + if (! TARGET_POWER && ! TARGET_POWER2 && ! TARGET_POWERPC) + builtin_define ("_ARCH_COM"); + if (TARGET_POPCNTD) + builtin_define ("_ARCH_PWR7"); + if (TARGET_ALTIVEC) + { + builtin_define ("__ALTIVEC__"); + builtin_define ("__VEC__=10206"); + + /* Define the AltiVec syntactic elements. */ + builtin_define ("__vector=__attribute__((altivec(vector__)))"); + builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short"); + builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned"); + + if (!flag_iso) + { + /* Define this when supporting context-sensitive keywords. */ + builtin_define ("__APPLE_ALTIVEC__"); + + builtin_define ("vector=vector"); + builtin_define ("pixel=pixel"); + builtin_define ("bool=bool"); + builtin_define ("_Bool=_Bool"); + init_vector_keywords (); + + /* Enable context-sensitive macros. */ + cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand; + } + } + if (rs6000_cpu == PROCESSOR_CELL) + builtin_define ("__PPU__"); + if (TARGET_SPE) + builtin_define ("__SPE__"); + if (TARGET_PAIRED_FLOAT) + builtin_define ("__PAIRED__"); + if (TARGET_SOFT_FLOAT) + builtin_define ("_SOFT_FLOAT"); + if ((!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE))) + ||(TARGET_HARD_FLOAT && TARGET_FPRS && !TARGET_DOUBLE_FLOAT)) + builtin_define ("_SOFT_DOUBLE"); + /* Used by lwarx/stwcx. errata work-around. */ + if (rs6000_cpu == PROCESSOR_PPC405) + builtin_define ("__PPC405__"); + /* Used by libstdc++. */ + if (TARGET_NO_LWSYNC) + builtin_define ("__NO_LWSYNC__"); + if (TARGET_VSX) + { + builtin_define ("__VSX__"); + + /* For the VSX builtin functions identical to Altivec functions, just map + the altivec builtin into the vsx version (the altivec functions + generate VSX code if -mvsx). */ + builtin_define ("__builtin_vsx_xxland=__builtin_vec_and"); + builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc"); + builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor"); + builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or"); + builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor"); + builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel"); + builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm"); + + /* Also map the a and m versions of the multiply/add instructions to the + builtin for people blindly going off the instruction manual. */ + builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp"); + builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp"); + builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp"); + builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp"); + builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp"); + builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp"); + builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp"); + builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp"); + builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp"); + builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp"); + builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp"); + builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp"); + builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp"); + builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp"); + builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp"); + builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp"); + } + if (RS6000_RECIP_HAVE_RE_P (DFmode)) + builtin_define ("__RECIP__"); + if (RS6000_RECIP_HAVE_RE_P (SFmode)) + builtin_define ("__RECIPF__"); + if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)) + builtin_define ("__RSQRTE__"); + if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)) + builtin_define ("__RSQRTEF__"); + if (TARGET_RECIP_PRECISION) + builtin_define ("__RECIP_PRECISION__"); + + /* Tell users they can use __builtin_bswap{16,64}. */ + builtin_define ("__HAVE_BSWAP__"); + + /* May be overridden by target configuration. */ + RS6000_CPU_CPP_ENDIAN_BUILTINS(); + + if (TARGET_LONG_DOUBLE_128) + { + builtin_define ("__LONG_DOUBLE_128__"); + builtin_define ("__LONGDOUBLE128"); + } + + switch (TARGET_CMODEL) + { + /* Deliberately omit __CMODEL_SMALL__ since that was the default + before --mcmodel support was added. */ + case CMODEL_MEDIUM: + builtin_define ("__CMODEL_MEDIUM__"); + break; + case CMODEL_LARGE: + builtin_define ("__CMODEL_LARGE__"); + break; + default: + break; + } + + switch (rs6000_current_abi) + { + case ABI_V4: + builtin_define ("_CALL_SYSV"); + break; + case ABI_AIX: + builtin_define ("_CALL_AIXDESC"); + builtin_define ("_CALL_AIX"); + break; + case ABI_DARWIN: + builtin_define ("_CALL_DARWIN"); + break; + default: + break; + } + + /* Let the compiled code know if 'f' class registers will not be available. */ + if (TARGET_SOFT_FLOAT || !TARGET_FPRS) + builtin_define ("__NO_FPRS__"); + + /* Generate defines for Xilinx FPU. */ + if (rs6000_xilinx_fpu) + { + builtin_define ("_XFPU"); + if (rs6000_single_float && ! rs6000_double_float) + { + if (rs6000_simple_fpu) + builtin_define ("_XFPU_SP_LITE"); + else + builtin_define ("_XFPU_SP_FULL"); + } + if (rs6000_double_float) + { + if (rs6000_simple_fpu) + builtin_define ("_XFPU_DP_LITE"); + else + builtin_define ("_XFPU_DP_FULL"); + } + } +} + + +struct altivec_builtin_types +{ + enum rs6000_builtins code; + enum rs6000_builtins overloaded_code; + signed char ret_type; + signed char op1; + signed char op2; + signed char op3; +}; + +const struct altivec_builtin_types altivec_overloaded_builtins[] = { + /* Unary AltiVec/VSX builtins. */ + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, VSX_BUILTIN_XVABSDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_CEIL, ALTIVEC_BUILTIN_VRFIP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_CEIL, VSX_BUILTIN_XVRDPIP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_EXPTE, ALTIVEC_BUILTIN_VEXPTEFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_FLOOR, VSX_BUILTIN_XVRDPIM, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_FLOOR, ALTIVEC_BUILTIN_VRFIM, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_LOGE, ALTIVEC_BUILTIN_VLOGEFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RE, ALTIVEC_BUILTIN_VREFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RE, VSX_BUILTIN_XVREDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ROUND, ALTIVEC_BUILTIN_VRFIN, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RECIP, ALTIVEC_BUILTIN_VRECIPFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_RECIP, VSX_BUILTIN_RECIP_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRT, ALTIVEC_BUILTIN_VRSQRTFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRT, VSX_BUILTIN_VEC_RSQRT_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRTE, ALTIVEC_BUILTIN_VRSQRTEFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRTE, VSX_BUILTIN_XVRSQRTEDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_TRUNC, ALTIVEC_BUILTIN_VRFIZ, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_TRUNC, VSX_BUILTIN_XVRDPIZ, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + + /* Binary AltiVec/VSX builtins. */ + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, VSX_BUILTIN_XVADDDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDFP, ALTIVEC_BUILTIN_VADDFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDC, ALTIVEC_BUILTIN_VADDCUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGSW, ALTIVEC_BUILTIN_VAVGSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGUW, ALTIVEC_BUILTIN_VAVGUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGSH, ALTIVEC_BUILTIN_VAVGSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGUH, ALTIVEC_BUILTIN_VAVGUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGSB, ALTIVEC_BUILTIN_VAVGSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGUB, ALTIVEC_BUILTIN_VAVGUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPB, ALTIVEC_BUILTIN_VCMPBFP, + RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, VSX_BUILTIN_XVCMPEQDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQFP, ALTIVEC_BUILTIN_VCMPEQFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + + { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + + { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + + { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_CMPGE, ALTIVEC_BUILTIN_VCMPGEFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_XVCMPGEDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, VSX_BUILTIN_XVCMPGTDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTFP, ALTIVEC_BUILTIN_VCMPGTFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLE, ALTIVEC_BUILTIN_VCMPGEFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_XVCMPGEDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, VSX_BUILTIN_XVCMPGTDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_COPYSIGN, VSX_BUILTIN_CPSGNDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_COPYSIGN, ALTIVEC_BUILTIN_COPYSIGN_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFUX, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFSX, + RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCFSX, ALTIVEC_BUILTIN_VCFSX, + RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCFUX, ALTIVEC_BUILTIN_VCFUX, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTS, ALTIVEC_BUILTIN_VCTSXS, + RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTU, ALTIVEC_BUILTIN_VCTUXS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL, + RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, VSX_BUILTIN_XVMAXDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXFP, ALTIVEC_BUILTIN_VMAXFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, VSX_BUILTIN_XVMINDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINFP, ALTIVEC_BUILTIN_VMINFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_XVMULSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_XVMULDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULEUB, ALTIVEC_BUILTIN_VMULEUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULESB, ALTIVEC_BUILTIN_VMULESB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULEUH, ALTIVEC_BUILTIN_VMULEUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULESH, ALTIVEC_BUILTIN_VMULESH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOSH, ALTIVEC_BUILTIN_VMULOSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOUH, ALTIVEC_BUILTIN_VMULOUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOSB, ALTIVEC_BUILTIN_VMULOSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOUB, ALTIVEC_BUILTIN_VMULOUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_NEARBYINT, VSX_BUILTIN_XVRDPI, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NEARBYINT, VSX_BUILTIN_XVRSPI, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKPX, ALTIVEC_BUILTIN_VPKPX, + RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSHSS, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSWSS, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSWSS, ALTIVEC_BUILTIN_VPKSWSS, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUWUS, ALTIVEC_BUILTIN_VPKUWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSHSS, ALTIVEC_BUILTIN_VPKSHSS, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUS, ALTIVEC_BUILTIN_VPKUHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSWUS, ALTIVEC_BUILTIN_VPKSWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSHUS, ALTIVEC_BUILTIN_VPKSHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_RINT, VSX_BUILTIN_XVRDPIC, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RINT, VSX_BUILTIN_XVRSPIC, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SQRT, VSX_BUILTIN_XVSQRTDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_SQRT, VSX_BUILTIN_XVSQRTSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, VSX_BUILTIN_XVSUBDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBFP, ALTIVEC_BUILTIN_VSUBFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4UBS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SBS, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUM4SHS, ALTIVEC_BUILTIN_VSUM4SHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUM4SBS, ALTIVEC_BUILTIN_VSUM4SBS, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUM4UBS, ALTIVEC_BUILTIN_VSUM4UBS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM2S, ALTIVEC_BUILTIN_VSUM2SWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUMS, ALTIVEC_BUILTIN_VSUMSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + /* Ternary AltiVec/VSX builtins. */ + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMADDFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_MADD, VSX_BUILTIN_XVMADDDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_MADDS, ALTIVEC_BUILTIN_VMHADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_MRADDS, ALTIVEC_BUILTIN_VMHRADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { VSX_BUILTIN_VEC_MSUB, VSX_BUILTIN_XVMSUBSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_MSUB, VSX_BUILTIN_XVMSUBDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUBM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMMBM, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUHM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMSHM, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMSHM, ALTIVEC_BUILTIN_VMSUMSHM, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMUHM, ALTIVEC_BUILTIN_VMSUMUHM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMMBM, ALTIVEC_BUILTIN_VMSUMMBM, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMUBM, ALTIVEC_BUILTIN_VMSUMUBM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMUHS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMSHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMSHS, ALTIVEC_BUILTIN_VMSUMSHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMUHS, ALTIVEC_BUILTIN_VMSUMUHS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { VSX_BUILTIN_VEC_NMADD, VSX_BUILTIN_XVNMADDSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_NMADD, VSX_BUILTIN_XVNMADDDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_NMSUB, ALTIVEC_BUILTIN_VNMSUBFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_NMSUB, VSX_BUILTIN_XVNMSUBDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_NOT_OPAQUE }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_NOT_OPAQUE }, + + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V4SI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V2DI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V4SI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V4SI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V8HI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V8HI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_INTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V16QI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_INTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_pixel_V8HI }, + + /* Predicates. */ + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTFP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VCMPGT_P, VSX_BUILTIN_XVCMPGTDP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + + + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQFP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VCMPEQ_P, VSX_BUILTIN_XVCMPEQDP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + + + /* cmpge is the same as cmpgt for all cases except floating point. + There is further code to deal with this special case in + altivec_build_resolved_builtin. */ + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGEFP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VCMPGE_P, VSX_BUILTIN_XVCMPGEDP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + + { (enum rs6000_builtins) 0, (enum rs6000_builtins) 0, 0, 0, 0, 0 } +}; + + +/* Convert a type stored into a struct altivec_builtin_types as ID, + into a tree. The types are in rs6000_builtin_types: negative values + create a pointer type for the type associated to ~ID. Note it is + a logical NOT, rather than a negation, otherwise you cannot represent + a pointer type for ID 0. */ + +static inline tree +rs6000_builtin_type (int id) +{ + tree t; + t = rs6000_builtin_types[id < 0 ? ~id : id]; + return id < 0 ? build_pointer_type (t) : t; +} + +/* Check whether the type of an argument, T, is compatible with a + type ID stored into a struct altivec_builtin_types. Integer + types are considered compatible; otherwise, the language hook + lang_hooks.types_compatible_p makes the decision. */ + +static inline bool +rs6000_builtin_type_compatible (tree t, int id) +{ + tree builtin_type; + builtin_type = rs6000_builtin_type (id); + if (t == error_mark_node) + return false; + if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type)) + return true; + else + return lang_hooks.types_compatible_p (t, builtin_type); +} + + +/* Build a tree for a function call to an Altivec non-overloaded builtin. + The overloaded builtin that matched the types and args is described + by DESC. The N arguments are given in ARGS, respectively. + + Actually the only thing it does is calling fold_convert on ARGS, with + a small exception for vec_{all,any}_{ge,le} predicates. */ + +static tree +altivec_build_resolved_builtin (tree *args, int n, + const struct altivec_builtin_types *desc) +{ + tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code]; + tree ret_type = rs6000_builtin_type (desc->ret_type); + tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl)); + tree arg_type[3]; + tree call; + + int i; + for (i = 0; i < n; i++) + arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes); + + /* The AltiVec overloading implementation is overall gross, but this + is particularly disgusting. The vec_{all,any}_{ge,le} builtins + are completely different for floating-point vs. integer vector + types, because the former has vcmpgefp, but the latter should use + vcmpgtXX. + + In practice, the second and third arguments are swapped, and the + condition (LT vs. EQ, which is recognizable by bit 1 of the first + argument) is reversed. Patch the arguments here before building + the resolved CALL_EXPR. */ + if (desc->code == ALTIVEC_BUILTIN_VCMPGE_P + && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P) + { + tree t; + t = args[2], args[2] = args[1], args[1] = t; + t = arg_type[2], arg_type[2] = arg_type[1], arg_type[1] = t; + + args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0], + build_int_cst (NULL_TREE, 2)); + } + + switch (n) + { + case 0: + call = build_call_expr (impl_fndecl, 0); + break; + case 1: + call = build_call_expr (impl_fndecl, 1, + fold_convert (arg_type[0], args[0])); + break; + case 2: + call = build_call_expr (impl_fndecl, 2, + fold_convert (arg_type[0], args[0]), + fold_convert (arg_type[1], args[1])); + break; + case 3: + call = build_call_expr (impl_fndecl, 3, + fold_convert (arg_type[0], args[0]), + fold_convert (arg_type[1], args[1]), + fold_convert (arg_type[2], args[2])); + break; + default: + gcc_unreachable (); + } + return fold_convert (ret_type, call); +} + +/* Implementation of the resolve_overloaded_builtin target hook, to + support Altivec's overloaded builtins. */ + +tree +altivec_resolve_overloaded_builtin (location_t loc, tree fndecl, + void *passed_arglist) +{ + VEC(tree,gc) *arglist = (VEC(tree,gc) *) passed_arglist; + unsigned int nargs = VEC_length (tree, arglist); + unsigned int fcode = DECL_FUNCTION_CODE (fndecl); + tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); + tree types[3], args[3]; + const struct altivec_builtin_types *desc; + unsigned int n; + + if ((fcode < ALTIVEC_BUILTIN_OVERLOADED_FIRST + || fcode > ALTIVEC_BUILTIN_OVERLOADED_LAST) + && (fcode < VSX_BUILTIN_OVERLOADED_FIRST + || fcode > VSX_BUILTIN_OVERLOADED_LAST)) + return NULL_TREE; + + /* For now treat vec_splats and vec_promote as the same. */ + if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS + || fcode == ALTIVEC_BUILTIN_VEC_PROMOTE) + { + tree type, arg; + int size; + int i; + bool unsigned_p; + VEC(constructor_elt,gc) *vec; + const char *name = fcode == ALTIVEC_BUILTIN_VEC_SPLATS ? "vec_splats": "vec_promote"; + + if (nargs == 0) + { + error ("%s only accepts %d arguments", name, (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)+1 ); + return error_mark_node; + } + if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS && nargs != 1) + { + error ("%s only accepts 1 argument", name); + return error_mark_node; + } + if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE && nargs != 2) + { + error ("%s only accepts 2 arguments", name); + return error_mark_node; + } + /* Ignore promote's element argument. */ + if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE + && !INTEGRAL_TYPE_P (TREE_TYPE (VEC_index (tree, arglist, 1)))) + goto bad; + + arg = VEC_index (tree, arglist, 0); + type = TREE_TYPE (arg); + if (!SCALAR_FLOAT_TYPE_P (type) + && !INTEGRAL_TYPE_P (type)) + goto bad; + unsigned_p = TYPE_UNSIGNED (type); + switch (TYPE_MODE (type)) + { + case DImode: + type = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node); + size = 2; + break; + case SImode: + type = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node); + size = 4; + break; + case HImode: + type = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node); + size = 8; + break; + case QImode: + type = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node); + size = 16; + break; + case SFmode: type = V4SF_type_node; size = 4; break; + case DFmode: type = V2DF_type_node; size = 2; break; + default: + goto bad; + } + arg = save_expr (fold_convert (TREE_TYPE (type), arg)); + vec = VEC_alloc (constructor_elt, gc, size); + for(i = 0; i < size; i++) + { + constructor_elt *elt; + + elt = VEC_quick_push (constructor_elt, vec, NULL); + elt->index = NULL_TREE; + elt->value = arg; + } + return build_constructor (type, vec); + } + + /* For now use pointer tricks to do the extaction, unless we are on VSX + extracting a double from a constant offset. */ + if (fcode == ALTIVEC_BUILTIN_VEC_EXTRACT) + { + tree arg1; + tree arg1_type; + tree arg2; + tree arg1_inner_type; + tree decl, stmt; + tree innerptrtype; + enum machine_mode mode; + + /* No second argument. */ + if (nargs != 2) + { + error ("vec_extract only accepts 2 arguments"); + return error_mark_node; + } + + arg2 = VEC_index (tree, arglist, 1); + arg1 = VEC_index (tree, arglist, 0); + arg1_type = TREE_TYPE (arg1); + + if (TREE_CODE (arg1_type) != VECTOR_TYPE) + goto bad; + if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2))) + goto bad; + + /* If we can use the VSX xxpermdi instruction, use that for extract. */ + mode = TYPE_MODE (arg1_type); + if ((mode == V2DFmode || mode == V2DImode) && VECTOR_MEM_VSX_P (mode) + && TREE_CODE (arg2) == INTEGER_CST + && TREE_INT_CST_HIGH (arg2) == 0 + && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1)) + { + tree call = NULL_TREE; + + if (mode == V2DFmode) + call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF]; + else if (mode == V2DImode) + call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI]; + + if (call) + return build_call_expr (call, 2, arg1, arg2); + } + + /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2). */ + arg1_inner_type = TREE_TYPE (arg1_type); + arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, + build_int_cst (TREE_TYPE (arg2), + TYPE_VECTOR_SUBPARTS (arg1_type) + - 1), 0); + decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type); + DECL_EXTERNAL (decl) = 0; + TREE_PUBLIC (decl) = 0; + DECL_CONTEXT (decl) = current_function_decl; + TREE_USED (decl) = 1; + TREE_TYPE (decl) = arg1_type; + TREE_READONLY (decl) = TYPE_READONLY (arg1_type); + DECL_INITIAL (decl) = arg1; + stmt = build1 (DECL_EXPR, arg1_type, decl); + TREE_ADDRESSABLE (decl) = 1; + SET_EXPR_LOCATION (stmt, loc); + stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt); + + innerptrtype = build_pointer_type (arg1_inner_type); + + stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0); + stmt = convert (innerptrtype, stmt); + stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1); + stmt = build_indirect_ref (loc, stmt, RO_NULL); + + return stmt; + } + + /* For now use pointer tricks to do the insertation, unless we are on VSX + inserting a double to a constant offset.. */ + if (fcode == ALTIVEC_BUILTIN_VEC_INSERT) + { + tree arg0; + tree arg1; + tree arg2; + tree arg1_type; + tree arg1_inner_type; + tree decl, stmt; + tree innerptrtype; + enum machine_mode mode; + + /* No second or third arguments. */ + if (nargs != 3) + { + error ("vec_insert only accepts 3 arguments"); + return error_mark_node; + } + + arg0 = VEC_index (tree, arglist, 0); + arg1 = VEC_index (tree, arglist, 1); + arg1_type = TREE_TYPE (arg1); + arg2 = VEC_index (tree, arglist, 2); + + if (TREE_CODE (arg1_type) != VECTOR_TYPE) + goto bad; + if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2))) + goto bad; + + /* If we can use the VSX xxpermdi instruction, use that for insert. */ + mode = TYPE_MODE (arg1_type); + if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode) + && TREE_CODE (arg2) == INTEGER_CST + && TREE_INT_CST_HIGH (arg2) == 0 + && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1)) + { + tree call = NULL_TREE; + + if (mode == V2DFmode) + call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DF]; + else if (mode == V2DImode) + call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DI]; + + /* Note, __builtin_vec_insert_ has vector and scalar types + reversed. */ + if (call) + return build_call_expr (call, 3, arg1, arg0, arg2); + } + + /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2) = arg0. */ + arg1_inner_type = TREE_TYPE (arg1_type); + arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, + build_int_cst (TREE_TYPE (arg2), + TYPE_VECTOR_SUBPARTS (arg1_type) + - 1), 0); + decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type); + DECL_EXTERNAL (decl) = 0; + TREE_PUBLIC (decl) = 0; + DECL_CONTEXT (decl) = current_function_decl; + TREE_USED (decl) = 1; + TREE_TYPE (decl) = arg1_type; + TREE_READONLY (decl) = TYPE_READONLY (arg1_type); + DECL_INITIAL (decl) = arg1; + stmt = build1 (DECL_EXPR, arg1_type, decl); + TREE_ADDRESSABLE (decl) = 1; + SET_EXPR_LOCATION (stmt, loc); + stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt); + + innerptrtype = build_pointer_type (arg1_inner_type); + + stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0); + stmt = convert (innerptrtype, stmt); + stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1); + stmt = build_indirect_ref (loc, stmt, RO_NULL); + stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt, + convert (TREE_TYPE (stmt), arg0)); + stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl); + return stmt; + } + + for (n = 0; + !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs; + fnargs = TREE_CHAIN (fnargs), n++) + { + tree decl_type = TREE_VALUE (fnargs); + tree arg = VEC_index (tree, arglist, n); + tree type; + + if (arg == error_mark_node) + return error_mark_node; + + if (n >= 3) + abort (); + + arg = default_conversion (arg); + + /* The C++ front-end converts float * to const void * using + NOP_EXPR (NOP_EXPR (x)). */ + type = TREE_TYPE (arg); + if (POINTER_TYPE_P (type) + && TREE_CODE (arg) == NOP_EXPR + && lang_hooks.types_compatible_p (TREE_TYPE (arg), + const_ptr_type_node) + && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)), + ptr_type_node)) + { + arg = TREE_OPERAND (arg, 0); + type = TREE_TYPE (arg); + } + + /* Remove the const from the pointers to simplify the overload + matching further down. */ + if (POINTER_TYPE_P (decl_type) + && POINTER_TYPE_P (type) + && TYPE_QUALS (TREE_TYPE (type)) != 0) + { + if (TYPE_READONLY (TREE_TYPE (type)) + && !TYPE_READONLY (TREE_TYPE (decl_type))) + warning (0, "passing arg %d of %qE discards qualifiers from" + "pointer target type", n + 1, fndecl); + type = build_pointer_type (build_qualified_type (TREE_TYPE (type), + 0)); + arg = fold_convert (type, arg); + } + + args[n] = arg; + types[n] = type; + } + + /* If the number of arguments did not match the prototype, return NULL + and the generic code will issue the appropriate error message. */ + if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || n < nargs) + return NULL; + + if (n == 0) + abort (); + + if (fcode == ALTIVEC_BUILTIN_VEC_STEP) + { + if (TREE_CODE (types[0]) != VECTOR_TYPE) + goto bad; + + return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0])); + } + + for (desc = altivec_overloaded_builtins; + desc->code && desc->code != fcode; desc++) + continue; + + /* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in + the opX fields. */ + for (; desc->code == fcode; desc++) + if ((desc->op1 == RS6000_BTI_NOT_OPAQUE + || rs6000_builtin_type_compatible (types[0], desc->op1)) + && (desc->op2 == RS6000_BTI_NOT_OPAQUE + || rs6000_builtin_type_compatible (types[1], desc->op2)) + && (desc->op3 == RS6000_BTI_NOT_OPAQUE + || rs6000_builtin_type_compatible (types[2], desc->op3))) + return altivec_build_resolved_builtin (args, n, desc); + + bad: + error ("invalid parameter combination for AltiVec intrinsic"); + return error_mark_node; +} diff --git a/gcc/config/rs6000/rs6000-modes.def b/gcc/config/rs6000/rs6000-modes.def new file mode 100644 index 000000000..724c947f1 --- /dev/null +++ b/gcc/config/rs6000/rs6000-modes.def @@ -0,0 +1,41 @@ +/* Definitions of target machine for GNU compiler, for IBM RS/6000. + Copyright (C) 2002, 2003, 2004, 2007, 2010 Free Software Foundation, Inc. + Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* 128-bit floating point. ABI_V4 uses IEEE quad, AIX/Darwin + adjust this in rs6000_option_override_internal. */ +FLOAT_MODE (TF, 16, ieee_quad_format); + +/* Add any extra modes needed to represent the condition code. + + For the RS/6000, we need separate modes when unsigned (logical) comparisons + are being done and we need a separate mode for floating-point. We also + use a mode for the case when we are comparing the results of two + comparisons, as then only the EQ bit is valid in the register. */ + +CC_MODE (CCUNS); +CC_MODE (CCFP); +CC_MODE (CCEQ); + +/* Vector modes. */ +VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */ +VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */ +VECTOR_MODE (INT, DI, 1); +VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */ +VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */ diff --git a/gcc/config/rs6000/rs6000-opts.h b/gcc/config/rs6000/rs6000-opts.h new file mode 100644 index 000000000..542fea790 --- /dev/null +++ b/gcc/config/rs6000/rs6000-opts.h @@ -0,0 +1,144 @@ +/* Definitions of target machine needed for option handling for GNU compiler, + for IBM RS/6000. + Copyright (C) 2010 + Free Software Foundation, Inc. + Contributed by Michael Meissner (meissner@linux.vnet.ibm.com) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef RS6000_OPTS_H +#define RS6000_OPTS_H + +/* Processor type. Order must match cpu attribute in MD file. */ +enum processor_type + { + PROCESSOR_RIOS1, + PROCESSOR_RIOS2, + PROCESSOR_RS64A, + PROCESSOR_MPCCORE, + PROCESSOR_PPC403, + PROCESSOR_PPC405, + PROCESSOR_PPC440, + PROCESSOR_PPC476, + PROCESSOR_PPC601, + PROCESSOR_PPC603, + PROCESSOR_PPC604, + PROCESSOR_PPC604e, + PROCESSOR_PPC620, + PROCESSOR_PPC630, + PROCESSOR_PPC750, + PROCESSOR_PPC7400, + PROCESSOR_PPC7450, + PROCESSOR_PPC8540, + PROCESSOR_PPCE300C2, + PROCESSOR_PPCE300C3, + PROCESSOR_PPCE500MC, + PROCESSOR_PPCE500MC64, + PROCESSOR_POWER4, + PROCESSOR_POWER5, + PROCESSOR_POWER6, + PROCESSOR_POWER7, + PROCESSOR_CELL, + PROCESSOR_PPCA2, + PROCESSOR_TITAN +}; + +/* FP processor type. */ +enum fpu_type_t +{ + FPU_NONE, /* No FPU */ + FPU_SF_LITE, /* Limited Single Precision FPU */ + FPU_DF_LITE, /* Limited Double Precision FPU */ + FPU_SF_FULL, /* Full Single Precision FPU */ + FPU_DF_FULL /* Full Double Single Precision FPU */ +}; + +/* Types of costly dependences. */ +enum rs6000_dependence_cost +{ + max_dep_latency = 1000, + no_dep_costly, + all_deps_costly, + true_store_to_load_dep_costly, + store_to_load_dep_costly +}; + +/* Types of nop insertion schemes in sched target hook sched_finish. */ +enum rs6000_nop_insertion +{ + sched_finish_regroup_exact = 1000, + sched_finish_pad_groups, + sched_finish_none +}; + +/* Dispatch group termination caused by an insn. */ +enum group_termination +{ + current_group, + previous_group +}; + +/* Enumeration to give which calling sequence to use. */ +enum rs6000_abi { + ABI_NONE, + ABI_AIX, /* IBM's AIX */ + ABI_V4, /* System V.4/eabi */ + ABI_DARWIN /* Apple's Darwin (OS X kernel) */ +}; + +/* Small data support types. */ +enum rs6000_sdata_type { + SDATA_NONE, /* No small data support. */ + SDATA_DATA, /* Just put data in .sbss/.sdata, don't use relocs. */ + SDATA_SYSV, /* Use r13 to point to .sdata/.sbss. */ + SDATA_EABI /* Use r13 like above, r2 points to .sdata2/.sbss2. */ +}; + +/* Type of traceback to use. */ +enum rs6000_traceback_type { + traceback_default = 0, + traceback_none, + traceback_part, + traceback_full +}; + +/* Code model for 64-bit linux. + small: 16-bit toc offsets. + medium: 32-bit toc offsets, static data and code within 2G of TOC pointer. + large: 32-bit toc offsets, no limit on static data and code. */ +enum rs6000_cmodel { + CMODEL_SMALL, + CMODEL_MEDIUM, + CMODEL_LARGE +}; + +/* Describe which vector unit to use for a given machine mode. */ +enum rs6000_vector { + VECTOR_NONE, /* Type is not a vector or not supported */ + VECTOR_ALTIVEC, /* Use altivec for vector processing */ + VECTOR_VSX, /* Use VSX for vector processing */ + VECTOR_PAIRED, /* Use paired floating point for vectors */ + VECTOR_SPE, /* Use SPE for vector processing */ + VECTOR_OTHER /* Some other vector unit */ +}; + +#endif diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h new file mode 100644 index 000000000..d79af36ce --- /dev/null +++ b/gcc/config/rs6000/rs6000-protos.h @@ -0,0 +1,198 @@ +/* Definitions of target machine for GNU compiler, for IBM RS/6000. + Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, + 2010, 2011 + Free Software Foundation, Inc. + Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GCC_RS6000_PROTOS_H +#define GCC_RS6000_PROTOS_H + +/* Declare functions in rs6000.c */ + +#ifdef RTX_CODE + +#ifdef TREE_CODE +extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, int, int, int, + tree, enum machine_mode); +#endif /* TREE_CODE */ + +extern bool easy_altivec_constant (rtx, enum machine_mode); +extern HOST_WIDE_INT const_vector_elt_as_int (rtx, unsigned int); +extern bool macho_lo_sum_memory_operand (rtx, enum machine_mode); +extern int num_insns_constant (rtx, enum machine_mode); +extern int num_insns_constant_wide (HOST_WIDE_INT); +extern int small_data_operand (rtx, enum machine_mode); +extern bool toc_relative_expr_p (rtx); +extern bool invalid_e500_subreg (rtx, enum machine_mode); +extern void validate_condition_mode (enum rtx_code, enum machine_mode); +extern bool legitimate_constant_pool_address_p (const_rtx, enum machine_mode, + bool); +extern bool legitimate_indirect_address_p (rtx, int); +extern bool legitimate_indexed_address_p (rtx, int); +extern bool avoiding_indexed_address_p (enum machine_mode); + +extern rtx rs6000_got_register (rtx); +extern rtx find_addr_reg (rtx); +extern rtx gen_easy_altivec_constant (rtx); +extern const char *output_vec_const_move (rtx *); +extern void rs6000_expand_vector_init (rtx, rtx); +extern void paired_expand_vector_init (rtx, rtx); +extern void rs6000_expand_vector_set (rtx, rtx, int); +extern void rs6000_expand_vector_extract (rtx, rtx, int); +extern void build_mask64_2_operands (rtx, rtx *); +extern int expand_block_clear (rtx[]); +extern int expand_block_move (rtx[]); +extern const char * rs6000_output_load_multiple (rtx[]); +extern int includes_lshift_p (rtx, rtx); +extern int includes_rshift_p (rtx, rtx); +extern int includes_rldic_lshift_p (rtx, rtx); +extern int includes_rldicr_lshift_p (rtx, rtx); +extern int insvdi_rshift_rlwimi_p (rtx, rtx, rtx); +extern int registers_ok_for_quad_peep (rtx, rtx); +extern int mems_ok_for_quad_peep (rtx, rtx); +extern bool gpr_or_gpr_p (rtx, rtx); +extern enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, + enum reg_class); +extern enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class, + enum machine_mode, + rtx); +extern bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, + enum reg_class, + enum machine_mode); +extern bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode, + enum machine_mode, + enum reg_class); +extern void rs6000_secondary_reload_inner (rtx, rtx, rtx, bool); +extern int paired_emit_vector_cond_expr (rtx, rtx, rtx, + rtx, rtx, rtx); +extern void paired_expand_vector_move (rtx operands[]); + + +extern int ccr_bit (rtx, int); +extern int extract_MB (rtx); +extern int extract_ME (rtx); +extern void rs6000_output_function_entry (FILE *, const char *); +extern void print_operand (FILE *, rtx, int); +extern void print_operand_address (FILE *, rtx); +extern enum rtx_code rs6000_reverse_condition (enum machine_mode, + enum rtx_code); +extern void rs6000_emit_sISEL (enum machine_mode, rtx[]); +extern void rs6000_emit_sCOND (enum machine_mode, rtx[]); +extern void rs6000_emit_cbranch (enum machine_mode, rtx[]); +extern char * output_cbranch (rtx, const char *, int, rtx); +extern char * output_e500_flip_gt_bit (rtx, rtx); +extern const char * output_probe_stack_range (rtx, rtx); +extern rtx rs6000_emit_set_const (rtx, enum machine_mode, rtx, int); +extern int rs6000_emit_cmove (rtx, rtx, rtx, rtx); +extern int rs6000_emit_vector_cond_expr (rtx, rtx, rtx, rtx, rtx, rtx); +extern void rs6000_emit_minmax (rtx, enum rtx_code, rtx, rtx); +extern void rs6000_emit_sync (enum rtx_code, enum machine_mode, + rtx, rtx, rtx, rtx, bool); +extern void rs6000_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx); +extern void rs6000_split_compare_and_swap (rtx, rtx, rtx, rtx, rtx); +extern void rs6000_expand_compare_and_swapqhi (rtx, rtx, rtx, rtx); +extern void rs6000_split_compare_and_swapqhi (rtx, rtx, rtx, rtx, rtx, rtx); +extern void rs6000_split_lock_test_and_set (rtx, rtx, rtx, rtx); +extern void rs6000_emit_swdiv (rtx, rtx, rtx, bool); +extern void rs6000_emit_swrsqrt (rtx, rtx); +extern void output_toc (FILE *, rtx, int, enum machine_mode); +extern rtx rs6000_longcall_ref (rtx); +extern void rs6000_fatal_bad_address (rtx); +extern rtx create_TOC_reference (rtx, rtx); +extern void rs6000_split_multireg_move (rtx, rtx); +extern void rs6000_emit_move (rtx, rtx, enum machine_mode); +extern rtx rs6000_secondary_memory_needed_rtx (enum machine_mode); +extern rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, + int, int, int, int *); +extern bool rs6000_legitimate_offset_address_p (enum machine_mode, rtx, int); +extern rtx rs6000_find_base_term (rtx); +extern bool rs6000_offsettable_memref_p (rtx); +extern rtx rs6000_return_addr (int, rtx); +extern void rs6000_output_symbol_ref (FILE*, rtx); +extern HOST_WIDE_INT rs6000_initial_elimination_offset (int, int); +extern void rs6000_emit_popcount (rtx, rtx); +extern void rs6000_emit_parity (rtx, rtx); + +extern rtx rs6000_machopic_legitimize_pic_address (rtx, enum machine_mode, + rtx); +extern rtx rs6000_address_for_fpconvert (rtx); +extern rtx rs6000_address_for_altivec (rtx); +extern rtx rs6000_allocate_stack_temp (enum machine_mode, bool, bool); +extern int rs6000_loop_align (rtx); +#endif /* RTX_CODE */ + +#ifdef TREE_CODE +extern unsigned int rs6000_special_round_type_align (tree, unsigned int, + unsigned int); +extern unsigned int darwin_rs6000_special_round_type_align (tree, unsigned int, + unsigned int); +extern tree altivec_resolve_overloaded_builtin (location_t, tree, void *); +extern rtx rs6000_libcall_value (enum machine_mode); +extern rtx rs6000_va_arg (tree, tree); +extern int function_ok_for_sibcall (tree); +extern void rs6000_elf_declare_function_name (FILE *, const char *, tree); +extern bool rs6000_elf_in_small_data_p (const_tree); +#ifdef ARGS_SIZE_RTX +/* expr.h defines ARGS_SIZE_RTX and `enum direction' */ +extern enum direction function_arg_padding (enum machine_mode, const_tree); +#endif /* ARGS_SIZE_RTX */ + +#endif /* TREE_CODE */ + +extern int direct_return (void); +extern int first_reg_to_save (void); +extern int first_fp_reg_to_save (void); +extern void output_ascii (FILE *, const char *, int); +extern void rs6000_gen_section_name (char **, const char *, const char *); +extern void output_function_profiler (FILE *, int); +extern void output_profile_hook (int); +extern int rs6000_trampoline_size (void); +extern alias_set_type get_TOC_alias_set (void); +extern void rs6000_emit_prologue (void); +extern void rs6000_emit_load_toc_table (int); +extern unsigned int rs6000_dbx_register_number (unsigned int); +extern void rs6000_emit_epilogue (int); +extern void rs6000_emit_eh_reg_restore (rtx, rtx); +extern const char * output_isel (rtx *); +extern bool rs6000_tls_referenced_p (rtx); + +extern void rs6000_aix_asm_output_dwarf_table_ref (char *); + +/* Declare functions in rs6000-c.c */ + +extern void rs6000_pragma_longcall (struct cpp_reader *); +extern void rs6000_cpu_cpp_builtins (struct cpp_reader *); +#ifdef TREE_CODE +extern bool rs6000_pragma_target_parse (tree, tree); +#endif + +#if TARGET_MACHO +char *output_call (rtx, rtx *, int, int); +#endif + +#ifdef NO_DOLLAR_IN_LABEL +const char * rs6000_xcoff_strip_dollar (const char *); +#endif + +void rs6000_final_prescan_insn (rtx, rtx *operand, int num_operands); + +extern bool rs6000_hard_regno_mode_ok_p[][FIRST_PSEUDO_REGISTER]; +extern unsigned char rs6000_class_max_nregs[][LIM_REG_CLASSES]; +extern unsigned char rs6000_hard_regno_nregs[][FIRST_PSEUDO_REGISTER]; +#endif /* rs6000-protos.h */ diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c new file mode 100644 index 000000000..730e23445 --- /dev/null +++ b/gcc/config/rs6000/rs6000.c @@ -0,0 +1,28250 @@ +/* Subroutines used for code generation on IBM RS/6000. + Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013 + Free Software Foundation, Inc. + Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "rtl.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "insn-config.h" +#include "conditions.h" +#include "insn-attr.h" +#include "flags.h" +#include "recog.h" +#include "obstack.h" +#include "tree.h" +#include "expr.h" +#include "optabs.h" +#include "except.h" +#include "function.h" +#include "output.h" +#include "basic-block.h" +#include "integrate.h" +#include "diagnostic-core.h" +#include "toplev.h" +#include "ggc.h" +#include "hashtab.h" +#include "tm_p.h" +#include "target.h" +#include "target-def.h" +#include "langhooks.h" +#include "reload.h" +#include "cfglayout.h" +#include "cfgloop.h" +#include "sched-int.h" +#include "gimple.h" +#include "tree-flow.h" +#include "intl.h" +#include "params.h" +#include "tm-constrs.h" +#if TARGET_XCOFF +#include "xcoffout.h" /* get declarations of xcoff_*_section_name */ +#endif +#if TARGET_MACHO +#include "gstab.h" /* for N_SLINE */ +#endif + +#ifndef TARGET_NO_PROTOTYPE +#define TARGET_NO_PROTOTYPE 0 +#endif + +#define min(A,B) ((A) < (B) ? (A) : (B)) +#define max(A,B) ((A) > (B) ? (A) : (B)) + +/* Structure used to define the rs6000 stack */ +typedef struct rs6000_stack { + int reload_completed; /* stack info won't change from here on */ + int first_gp_reg_save; /* first callee saved GP register used */ + int first_fp_reg_save; /* first callee saved FP register used */ + int first_altivec_reg_save; /* first callee saved AltiVec register used */ + int lr_save_p; /* true if the link reg needs to be saved */ + int cr_save_p; /* true if the CR reg needs to be saved */ + unsigned int vrsave_mask; /* mask of vec registers to save */ + int push_p; /* true if we need to allocate stack space */ + int calls_p; /* true if the function makes any calls */ + int world_save_p; /* true if we're saving *everything*: + r13-r31, cr, f14-f31, vrsave, v20-v31 */ + enum rs6000_abi abi; /* which ABI to use */ + int gp_save_offset; /* offset to save GP regs from initial SP */ + int fp_save_offset; /* offset to save FP regs from initial SP */ + int altivec_save_offset; /* offset to save AltiVec regs from initial SP */ + int lr_save_offset; /* offset to save LR from initial SP */ + int cr_save_offset; /* offset to save CR from initial SP */ + int vrsave_save_offset; /* offset to save VRSAVE from initial SP */ + int spe_gp_save_offset; /* offset to save spe 64-bit gprs */ + int varargs_save_offset; /* offset to save the varargs registers */ + int ehrd_offset; /* offset to EH return data */ + int reg_size; /* register size (4 or 8) */ + HOST_WIDE_INT vars_size; /* variable save area size */ + int parm_size; /* outgoing parameter size */ + int save_size; /* save area size */ + int fixed_size; /* fixed size of stack frame */ + int gp_size; /* size of saved GP registers */ + int fp_size; /* size of saved FP registers */ + int altivec_size; /* size of saved AltiVec registers */ + int cr_size; /* size to hold CR if not in save_size */ + int vrsave_size; /* size to hold VRSAVE if not in save_size */ + int altivec_padding_size; /* size of altivec alignment padding if + not in save_size */ + int spe_gp_size; /* size of 64-bit GPR save size for SPE */ + int spe_padding_size; + HOST_WIDE_INT total_size; /* total bytes allocated for stack */ + int spe_64bit_regs_used; + int savres_strategy; +} rs6000_stack_t; + +/* A C structure for machine-specific, per-function data. + This is added to the cfun structure. */ +typedef struct GTY(()) machine_function +{ + /* Some local-dynamic symbol. */ + const char *some_ld_name; + /* Whether the instruction chain has been scanned already. */ + int insn_chain_scanned_p; + /* Flags if __builtin_return_address (n) with n >= 1 was used. */ + int ra_needs_full_frame; + /* Flags if __builtin_return_address (0) was used. */ + int ra_need_lr; + /* Cache lr_save_p after expansion of builtin_eh_return. */ + int lr_save_state; + /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4 + varargs save area. */ + HOST_WIDE_INT varargs_save_offset; + /* Temporary stack slot to use for SDmode copies. This slot is + 64-bits wide and is allocated early enough so that the offset + does not overflow the 16-bit load/store offset field. */ + rtx sdmode_stack_slot; +} machine_function; + +/* Target cpu type */ + +struct rs6000_cpu_select rs6000_select[3] = +{ + /* switch name, tune arch */ + { (const char *)0, "--with-cpu=", 1, 1 }, + { (const char *)0, "-mcpu=", 1, 1 }, + { (const char *)0, "-mtune=", 1, 0 }, +}; + +/* String variables to hold the various options. */ +static const char *rs6000_sched_insert_nops_str; +static const char *rs6000_sched_costly_dep_str; +static const char *rs6000_recip_name; + +#ifdef USING_ELFOS_H +static const char *rs6000_abi_name; +static const char *rs6000_sdata_name; +#endif + +/* Support targetm.vectorize.builtin_mask_for_load. */ +static GTY(()) tree altivec_builtin_mask_for_load; + +/* Set to nonzero once AIX common-mode calls have been defined. */ +static GTY(()) int common_mode_defined; + +/* Label number of label created for -mrelocatable, to call to so we can + get the address of the GOT section */ +static int rs6000_pic_labelno; + +#ifdef USING_ELFOS_H +/* Counter for labels which are to be placed in .fixup. */ +int fixuplabelno = 0; +#endif + +/* Whether to use variant of AIX ABI for PowerPC64 Linux. */ +int dot_symbols; + +/* Specify the machine mode that pointers have. After generation of rtl, the + compiler makes no further distinction between pointers and any other objects + of this machine mode. The type is unsigned since not all things that + include rs6000.h also include machmode.h. */ +unsigned rs6000_pmode; + +/* Width in bits of a pointer. */ +unsigned rs6000_pointer_size; + +#ifdef HAVE_AS_GNU_ATTRIBUTE +/* Flag whether floating point values have been passed/returned. */ +static bool rs6000_passes_float; +/* Flag whether vector values have been passed/returned. */ +static bool rs6000_passes_vector; +/* Flag whether small (<= 8 byte) structures have been returned. */ +static bool rs6000_returns_struct; +#endif + +/* Value is TRUE if register/mode pair is acceptable. */ +bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER]; + +/* Maximum number of registers needed for a given register class and mode. */ +unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES]; + +/* How many registers are needed for a given register and mode. */ +unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER]; + +/* Map register number to register class. */ +enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER]; + +/* Reload functions based on the type and the vector unit. */ +static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2]; + +/* Built in types. */ +tree rs6000_builtin_types[RS6000_BTI_MAX]; +tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT]; + +/* Flag to say the TOC is initialized */ +int toc_initialized; +char toc_label_name[10]; + +/* Cached value of rs6000_variable_issue. This is cached in + rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */ +static short cached_can_issue_more; + +static GTY(()) section *read_only_data_section; +static GTY(()) section *private_data_section; +static GTY(()) section *read_only_private_data_section; +static GTY(()) section *sdata2_section; +static GTY(()) section *toc_section; + +/* True for any options that were explicitly set. */ +static struct { + bool aix_struct_ret; /* True if -maix-struct-ret was used. */ + bool alignment; /* True if -malign- was used. */ + bool spe_abi; /* True if -mabi=spe/no-spe was used. */ + bool altivec_abi; /* True if -mabi=altivec/no-altivec used. */ + bool spe; /* True if -mspe= was used. */ + bool float_gprs; /* True if -mfloat-gprs= was used. */ + bool long_double; /* True if -mlong-double- was used. */ + bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */ + bool vrsave; /* True if -mvrsave was used. */ + bool cmodel; /* True if -mcmodel was used. */ +} rs6000_explicit_options; + +struct builtin_description +{ + /* mask is not const because we're going to alter it below. This + nonsense will go away when we rewrite the -march infrastructure + to give us more target flag bits. */ + unsigned int mask; + const enum insn_code icode; + const char *const name; + const enum rs6000_builtins code; +}; + +/* Describe the vector unit used for modes. */ +enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES]; +enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES]; + +/* Register classes for various constraints that are based on the target + switches. */ +enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX]; + +/* Describe the alignment of a vector. */ +int rs6000_vector_align[NUM_MACHINE_MODES]; + +/* Map selected modes to types for builtins. */ +static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2]; + +/* What modes to automatically generate reciprocal divide estimate (fre) and + reciprocal sqrt (frsqrte) for. */ +unsigned char rs6000_recip_bits[MAX_MACHINE_MODE]; + +/* Masks to determine which reciprocal esitmate instructions to generate + automatically. */ +enum rs6000_recip_mask { + RECIP_SF_DIV = 0x001, /* Use divide estimate */ + RECIP_DF_DIV = 0x002, + RECIP_V4SF_DIV = 0x004, + RECIP_V2DF_DIV = 0x008, + + RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */ + RECIP_DF_RSQRT = 0x020, + RECIP_V4SF_RSQRT = 0x040, + RECIP_V2DF_RSQRT = 0x080, + + /* Various combination of flags for -mrecip=xxx. */ + RECIP_NONE = 0, + RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV + | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT + | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT), + + RECIP_HIGH_PRECISION = RECIP_ALL, + + /* On low precision machines like the power5, don't enable double precision + reciprocal square root estimate, since it isn't accurate enough. */ + RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT)) +}; + +/* -mrecip options. */ +static struct +{ + const char *string; /* option name */ + unsigned int mask; /* mask bits to set */ +} recip_options[] = { + { "all", RECIP_ALL }, + { "none", RECIP_NONE }, + { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV + | RECIP_V2DF_DIV) }, + { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) }, + { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) }, + { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT + | RECIP_V2DF_RSQRT) }, + { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) }, + { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) }, +}; + +/* 2 argument gen function typedef. */ +typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx); + + +/* Target cpu costs. */ + +struct processor_costs { + const int mulsi; /* cost of SImode multiplication. */ + const int mulsi_const; /* cost of SImode multiplication by constant. */ + const int mulsi_const9; /* cost of SImode mult by short constant. */ + const int muldi; /* cost of DImode multiplication. */ + const int divsi; /* cost of SImode division. */ + const int divdi; /* cost of DImode division. */ + const int fp; /* cost of simple SFmode and DFmode insns. */ + const int dmul; /* cost of DFmode multiplication (and fmadd). */ + const int sdiv; /* cost of SFmode division (fdivs). */ + const int ddiv; /* cost of DFmode division (fdiv). */ + const int cache_line_size; /* cache line size in bytes. */ + const int l1_cache_size; /* size of l1 cache, in kilobytes. */ + const int l2_cache_size; /* size of l2 cache, in kilobytes. */ + const int simultaneous_prefetches; /* number of parallel prefetch + operations. */ +}; + +const struct processor_costs *rs6000_cost; + +/* Processor costs (relative to an add) */ + +/* Instruction size costs on 32bit processors. */ +static const +struct processor_costs size32_cost = { + COSTS_N_INSNS (1), /* mulsi */ + COSTS_N_INSNS (1), /* mulsi_const */ + COSTS_N_INSNS (1), /* mulsi_const9 */ + COSTS_N_INSNS (1), /* muldi */ + COSTS_N_INSNS (1), /* divsi */ + COSTS_N_INSNS (1), /* divdi */ + COSTS_N_INSNS (1), /* fp */ + COSTS_N_INSNS (1), /* dmul */ + COSTS_N_INSNS (1), /* sdiv */ + COSTS_N_INSNS (1), /* ddiv */ + 32, + 0, + 0, + 0, +}; + +/* Instruction size costs on 64bit processors. */ +static const +struct processor_costs size64_cost = { + COSTS_N_INSNS (1), /* mulsi */ + COSTS_N_INSNS (1), /* mulsi_const */ + COSTS_N_INSNS (1), /* mulsi_const9 */ + COSTS_N_INSNS (1), /* muldi */ + COSTS_N_INSNS (1), /* divsi */ + COSTS_N_INSNS (1), /* divdi */ + COSTS_N_INSNS (1), /* fp */ + COSTS_N_INSNS (1), /* dmul */ + COSTS_N_INSNS (1), /* sdiv */ + COSTS_N_INSNS (1), /* ddiv */ + 128, + 0, + 0, + 0, +}; + +/* Instruction costs on RIOS1 processors. */ +static const +struct processor_costs rios1_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (19), /* divsi */ + COSTS_N_INSNS (19), /* divdi */ + COSTS_N_INSNS (2), /* fp */ + COSTS_N_INSNS (2), /* dmul */ + COSTS_N_INSNS (19), /* sdiv */ + COSTS_N_INSNS (19), /* ddiv */ + 128, /* cache line size */ + 64, /* l1 cache */ + 512, /* l2 cache */ + 0, /* streams */ +}; + +/* Instruction costs on RIOS2 processors. */ +static const +struct processor_costs rios2_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (13), /* divsi */ + COSTS_N_INSNS (13), /* divdi */ + COSTS_N_INSNS (2), /* fp */ + COSTS_N_INSNS (2), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (17), /* ddiv */ + 256, /* cache line size */ + 256, /* l1 cache */ + 1024, /* l2 cache */ + 0, /* streams */ +}; + +/* Instruction costs on RS64A processors. */ +static const +struct processor_costs rs64a_cost = { + COSTS_N_INSNS (20), /* mulsi */ + COSTS_N_INSNS (12), /* mulsi_const */ + COSTS_N_INSNS (8), /* mulsi_const9 */ + COSTS_N_INSNS (34), /* muldi */ + COSTS_N_INSNS (65), /* divsi */ + COSTS_N_INSNS (67), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (31), /* sdiv */ + COSTS_N_INSNS (31), /* ddiv */ + 128, /* cache line size */ + 128, /* l1 cache */ + 2048, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on MPCCORE processors. */ +static const +struct processor_costs mpccore_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (6), /* divsi */ + COSTS_N_INSNS (6), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (10), /* sdiv */ + COSTS_N_INSNS (17), /* ddiv */ + 32, /* cache line size */ + 4, /* l1 cache */ + 16, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC403 processors. */ +static const +struct processor_costs ppc403_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (33), /* divsi */ + COSTS_N_INSNS (33), /* divdi */ + COSTS_N_INSNS (11), /* fp */ + COSTS_N_INSNS (11), /* dmul */ + COSTS_N_INSNS (11), /* sdiv */ + COSTS_N_INSNS (11), /* ddiv */ + 32, /* cache line size */ + 4, /* l1 cache */ + 16, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC405 processors. */ +static const +struct processor_costs ppc405_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (35), /* divsi */ + COSTS_N_INSNS (35), /* divdi */ + COSTS_N_INSNS (11), /* fp */ + COSTS_N_INSNS (11), /* dmul */ + COSTS_N_INSNS (11), /* sdiv */ + COSTS_N_INSNS (11), /* ddiv */ + 32, /* cache line size */ + 16, /* l1 cache */ + 128, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC440 processors. */ +static const +struct processor_costs ppc440_cost = { + COSTS_N_INSNS (3), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (3), /* muldi */ + COSTS_N_INSNS (34), /* divsi */ + COSTS_N_INSNS (34), /* divdi */ + COSTS_N_INSNS (5), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (19), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC476 processors. */ +static const +struct processor_costs ppc476_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (11), /* divsi */ + COSTS_N_INSNS (11), /* divdi */ + COSTS_N_INSNS (6), /* fp */ + COSTS_N_INSNS (6), /* dmul */ + COSTS_N_INSNS (19), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, /* l1 cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC601 processors. */ +static const +struct processor_costs ppc601_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (5), /* mulsi_const */ + COSTS_N_INSNS (5), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (36), /* divsi */ + COSTS_N_INSNS (36), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (31), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC603 processors. */ +static const +struct processor_costs ppc603_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (37), /* divsi */ + COSTS_N_INSNS (37), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, /* cache line size */ + 8, /* l1 cache */ + 64, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC604 processors. */ +static const +struct processor_costs ppc604_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (20), /* divsi */ + COSTS_N_INSNS (20), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (32), /* ddiv */ + 32, /* cache line size */ + 16, /* l1 cache */ + 512, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC604e processors. */ +static const +struct processor_costs ppc604e_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (20), /* divsi */ + COSTS_N_INSNS (20), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (32), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC620 processors. */ +static const +struct processor_costs ppc620_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (7), /* muldi */ + COSTS_N_INSNS (21), /* divsi */ + COSTS_N_INSNS (37), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (32), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC630 processors. */ +static const +struct processor_costs ppc630_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (7), /* muldi */ + COSTS_N_INSNS (21), /* divsi */ + COSTS_N_INSNS (37), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (21), /* ddiv */ + 128, /* cache line size */ + 64, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on Cell processor. */ +/* COSTS_N_INSNS (1) ~ one add. */ +static const +struct processor_costs ppccell_cost = { + COSTS_N_INSNS (9/2)+2, /* mulsi */ + COSTS_N_INSNS (6/2), /* mulsi_const */ + COSTS_N_INSNS (6/2), /* mulsi_const9 */ + COSTS_N_INSNS (15/2)+2, /* muldi */ + COSTS_N_INSNS (38/2), /* divsi */ + COSTS_N_INSNS (70/2), /* divdi */ + COSTS_N_INSNS (10/2), /* fp */ + COSTS_N_INSNS (10/2), /* dmul */ + COSTS_N_INSNS (74/2), /* sdiv */ + COSTS_N_INSNS (74/2), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 6, /* streams */ +}; + +/* Instruction costs on PPC750 and PPC7400 processors. */ +static const +struct processor_costs ppc750_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (17), /* divsi */ + COSTS_N_INSNS (17), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (31), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC7450 processors. */ +static const +struct processor_costs ppc7450_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (23), /* divsi */ + COSTS_N_INSNS (23), /* divdi */ + COSTS_N_INSNS (5), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (21), /* sdiv */ + COSTS_N_INSNS (35), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ +}; + +/* Instruction costs on PPC8540 processors. */ +static const +struct processor_costs ppc8540_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (19), /* divsi */ + COSTS_N_INSNS (19), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (29), /* sdiv */ + COSTS_N_INSNS (29), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 1, /* prefetch streams /*/ +}; + +/* Instruction costs on E300C2 and E300C3 cores. */ +static const +struct processor_costs ppce300c2c3_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (19), /* divsi */ + COSTS_N_INSNS (19), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, + 16, /* l1 cache */ + 16, /* l2 cache */ + 1, /* prefetch streams /*/ +}; + +/* Instruction costs on PPCE500MC processors. */ +static const +struct processor_costs ppce500mc_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (14), /* divsi */ + COSTS_N_INSNS (14), /* divdi */ + COSTS_N_INSNS (8), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (36), /* sdiv */ + COSTS_N_INSNS (66), /* ddiv */ + 64, /* cache line size */ + 32, /* l1 cache */ + 128, /* l2 cache */ + 1, /* prefetch streams /*/ +}; + +/* Instruction costs on PPCE500MC64 processors. */ +static const +struct processor_costs ppce500mc64_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (14), /* divsi */ + COSTS_N_INSNS (14), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (36), /* sdiv */ + COSTS_N_INSNS (66), /* ddiv */ + 64, /* cache line size */ + 32, /* l1 cache */ + 128, /* l2 cache */ + 1, /* prefetch streams /*/ +}; + +/* Instruction costs on AppliedMicro Titan processors. */ +static const +struct processor_costs titan_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (5), /* mulsi_const */ + COSTS_N_INSNS (5), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (18), /* divsi */ + COSTS_N_INSNS (18), /* divdi */ + COSTS_N_INSNS (10), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (46), /* sdiv */ + COSTS_N_INSNS (72), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 1, /* prefetch streams /*/ +}; + +/* Instruction costs on POWER4 and POWER5 processors. */ +static const +struct processor_costs power4_cost = { + COSTS_N_INSNS (3), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (18), /* divsi */ + COSTS_N_INSNS (34), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (17), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 8, /* prefetch streams /*/ +}; + +/* Instruction costs on POWER6 processors. */ +static const +struct processor_costs power6_cost = { + COSTS_N_INSNS (8), /* mulsi */ + COSTS_N_INSNS (8), /* mulsi_const */ + COSTS_N_INSNS (8), /* mulsi_const9 */ + COSTS_N_INSNS (8), /* muldi */ + COSTS_N_INSNS (22), /* divsi */ + COSTS_N_INSNS (28), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (13), /* sdiv */ + COSTS_N_INSNS (16), /* ddiv */ + 128, /* cache line size */ + 64, /* l1 cache */ + 2048, /* l2 cache */ + 16, /* prefetch streams */ +}; + +/* Instruction costs on POWER7 processors. */ +static const +struct processor_costs power7_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (18), /* divsi */ + COSTS_N_INSNS (34), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (13), /* sdiv */ + COSTS_N_INSNS (16), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 12, /* prefetch streams */ +}; + +/* Instruction costs on POWER A2 processors. */ +static const +struct processor_costs ppca2_cost = { + COSTS_N_INSNS (16), /* mulsi */ + COSTS_N_INSNS (16), /* mulsi_const */ + COSTS_N_INSNS (16), /* mulsi_const9 */ + COSTS_N_INSNS (16), /* muldi */ + COSTS_N_INSNS (22), /* divsi */ + COSTS_N_INSNS (28), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (59), /* sdiv */ + COSTS_N_INSNS (72), /* ddiv */ + 64, + 16, /* l1 cache */ + 2048, /* l2 cache */ + 16, /* prefetch streams */ +}; + + +/* Table that classifies rs6000 builtin functions (pure, const, etc.). */ +#undef RS6000_BUILTIN +#undef RS6000_BUILTIN_EQUATE +#define RS6000_BUILTIN(NAME, TYPE) TYPE, +#define RS6000_BUILTIN_EQUATE(NAME, VALUE) + +static const enum rs6000_btc builtin_classify[(int)RS6000_BUILTIN_COUNT] = +{ +#include "rs6000-builtin.def" +}; + +#undef RS6000_BUILTIN +#undef RS6000_BUILTIN_EQUATE + +/* Support for -mveclibabi= to control which vector library to use. */ +static tree (*rs6000_veclib_handler) (tree, tree, tree); + + +static bool rs6000_function_ok_for_sibcall (tree, tree); +static const char *rs6000_invalid_within_doloop (const_rtx); +static bool rs6000_legitimate_address_p (enum machine_mode, rtx, bool); +static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool); +static rtx rs6000_generate_compare (rtx, enum machine_mode); +static void rs6000_emit_stack_tie (void); +static void rs6000_frame_related (rtx, rtx, HOST_WIDE_INT, rtx, rtx); +static bool spe_func_has_64bit_regs_p (void); +static void emit_frame_save (rtx, rtx, enum machine_mode, unsigned int, + int, HOST_WIDE_INT); +static rtx gen_frame_mem_offset (enum machine_mode, rtx, int); +static unsigned rs6000_hash_constant (rtx); +static unsigned toc_hash_function (const void *); +static int toc_hash_eq (const void *, const void *); +static bool reg_offset_addressing_ok_p (enum machine_mode); +static bool virtual_stack_registers_memory_p (rtx); +static bool constant_pool_expr_p (rtx); +static bool legitimate_small_data_p (enum machine_mode, rtx); +static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int); +static struct machine_function * rs6000_init_machine_status (void); +static bool rs6000_assemble_integer (rtx, unsigned int, int); +static bool no_global_regs_above (int, bool); +#ifdef HAVE_GAS_HIDDEN +static void rs6000_assemble_visibility (tree, int); +#endif +static int rs6000_ra_ever_killed (void); +static bool rs6000_attribute_takes_identifier_p (const_tree); +static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *); +static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *); +static bool rs6000_ms_bitfield_layout_p (const_tree); +static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *); +static void rs6000_eliminate_indexed_memrefs (rtx operands[2]); +static const char *rs6000_mangle_type (const_tree); +static void rs6000_set_default_type_attributes (tree); +static rtx rs6000_savres_routine_sym (rs6000_stack_t *, bool, bool, bool); +static rtx rs6000_emit_stack_reset (rs6000_stack_t *, rtx, rtx, int, bool); +static rtx rs6000_make_savres_rtx (rs6000_stack_t *, rtx, int, + enum machine_mode, bool, bool, bool); +static bool rs6000_reg_live_or_pic_offset_p (int); +static tree rs6000_builtin_vectorized_libmass (tree, tree, tree); +static tree rs6000_builtin_vectorized_function (tree, tree, tree); +static void rs6000_restore_saved_cr (rtx, int); +static bool rs6000_output_addr_const_extra (FILE *, rtx); +static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT); +static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT); +static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, + tree); +static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT); +static bool rs6000_return_in_memory (const_tree, const_tree); +static rtx rs6000_function_value (const_tree, const_tree, bool); +static void rs6000_file_start (void); +#if TARGET_ELF +static int rs6000_elf_reloc_rw_mask (void); +static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED; +static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED; +static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED; +static void rs6000_elf_asm_init_sections (void); +static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx, + unsigned HOST_WIDE_INT); +static void rs6000_elf_encode_section_info (tree, rtx, int) + ATTRIBUTE_UNUSED; +#endif +static bool rs6000_use_blocks_for_constant_p (enum machine_mode, const_rtx); +static void rs6000_alloc_sdmode_stack_slot (void); +static void rs6000_instantiate_decls (void); +#if TARGET_XCOFF +static void rs6000_xcoff_asm_output_anchor (rtx); +static void rs6000_xcoff_asm_globalize_label (FILE *, const char *); +static void rs6000_xcoff_asm_init_sections (void); +static int rs6000_xcoff_reloc_rw_mask (void); +static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree); +static section *rs6000_xcoff_select_section (tree, int, + unsigned HOST_WIDE_INT); +static void rs6000_xcoff_unique_section (tree, int); +static section *rs6000_xcoff_select_rtx_section + (enum machine_mode, rtx, unsigned HOST_WIDE_INT); +static const char * rs6000_xcoff_strip_name_encoding (const char *); +static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int); +static void rs6000_xcoff_file_start (void); +static void rs6000_xcoff_file_end (void); +#endif +static int rs6000_variable_issue (FILE *, int, rtx, int); +static int rs6000_register_move_cost (enum machine_mode, + reg_class_t, reg_class_t); +static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool); +static bool rs6000_rtx_costs (rtx, int, int, int *, bool); +static bool rs6000_debug_rtx_costs (rtx, int, int, int *, bool); +static int rs6000_debug_address_cost (rtx, bool); +static int rs6000_adjust_cost (rtx, rtx, rtx, int); +static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int); +static void rs6000_sched_init (FILE *, int, int); +static bool is_microcoded_insn (rtx); +static bool is_nonpipeline_insn (rtx); +static bool is_cracked_insn (rtx); +static bool is_branch_slot_insn (rtx); +static bool is_load_insn (rtx); +static rtx get_store_dest (rtx pat); +static bool is_store_insn (rtx); +static bool set_to_load_agen (rtx,rtx); +static bool adjacent_mem_locations (rtx,rtx); +static int rs6000_adjust_priority (rtx, int); +static int rs6000_issue_rate (void); +static bool rs6000_is_costly_dependence (dep_t, int, int); +static rtx get_next_active_insn (rtx, rtx); +static bool insn_terminates_group_p (rtx , enum group_termination); +static bool insn_must_be_first_in_group (rtx); +static bool insn_must_be_last_in_group (rtx); +static bool is_costly_group (rtx *, rtx); +static int force_new_group (int, FILE *, rtx *, rtx, bool *, int, int *); +static int redefine_groups (FILE *, int, rtx, rtx); +static int pad_groups (FILE *, int, rtx, rtx); +static void rs6000_sched_finish (FILE *, int); +static int rs6000_sched_reorder (FILE *, int, rtx *, int *, int); +static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int); +static int rs6000_use_sched_lookahead (void); +static int rs6000_use_sched_lookahead_guard (rtx); +static void * rs6000_alloc_sched_context (void); +static void rs6000_init_sched_context (void *, bool); +static void rs6000_set_sched_context (void *); +static void rs6000_free_sched_context (void *); +static tree rs6000_builtin_reciprocal (unsigned int, bool, bool); +static tree rs6000_builtin_mask_for_load (void); +static tree rs6000_builtin_mul_widen_even (tree); +static tree rs6000_builtin_mul_widen_odd (tree); +static tree rs6000_builtin_conversion (unsigned int, tree, tree); +static tree rs6000_builtin_vec_perm (tree, tree *); +static bool rs6000_builtin_support_vector_misalignment (enum + machine_mode, + const_tree, + int, bool); +static int rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt, + tree, int); +static enum machine_mode rs6000_preferred_simd_mode (enum machine_mode); + +static void def_builtin (int, const char *, tree, int); +static bool rs6000_vector_alignment_reachable (const_tree, bool); +static void rs6000_init_builtins (void); +static tree rs6000_builtin_decl (unsigned, bool); + +static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx); +static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx); +static rtx rs6000_expand_ternop_builtin (enum insn_code, tree, rtx); +static rtx rs6000_expand_builtin (tree, rtx, rtx, enum machine_mode, int); +static void altivec_init_builtins (void); +static unsigned builtin_hash_function (const void *); +static int builtin_hash_eq (const void *, const void *); +static tree builtin_function_type (enum machine_mode, enum machine_mode, + enum machine_mode, enum machine_mode, + enum rs6000_builtins, const char *name); +static void rs6000_common_init_builtins (void); +static void rs6000_init_libfuncs (void); + +static void paired_init_builtins (void); +static rtx paired_expand_builtin (tree, rtx, bool *); +static rtx paired_expand_lv_builtin (enum insn_code, tree, rtx); +static rtx paired_expand_stv_builtin (enum insn_code, tree); +static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx); + +static void enable_mask_for_builtins (struct builtin_description *, int, + enum rs6000_builtins, + enum rs6000_builtins); +static void spe_init_builtins (void); +static rtx spe_expand_builtin (tree, rtx, bool *); +static rtx spe_expand_stv_builtin (enum insn_code, tree); +static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx); +static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx); +static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx); +static rs6000_stack_t *rs6000_stack_info (void); +static void debug_stack_info (rs6000_stack_t *); + +static rtx altivec_expand_builtin (tree, rtx, bool *); +static rtx altivec_expand_ld_builtin (tree, rtx, bool *); +static rtx altivec_expand_st_builtin (tree, rtx, bool *); +static rtx altivec_expand_dst_builtin (tree, rtx, bool *); +static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx); +static rtx altivec_expand_predicate_builtin (enum insn_code, tree, rtx); +static rtx altivec_expand_stv_builtin (enum insn_code, tree); +static rtx altivec_expand_vec_init_builtin (tree, tree, rtx); +static rtx altivec_expand_vec_set_builtin (tree); +static rtx altivec_expand_vec_ext_builtin (tree, rtx); +static int get_element_number (tree, tree); +static void rs6000_option_override (void); +static void rs6000_option_init_struct (struct gcc_options *); +static void rs6000_option_default_params (void); +static bool rs6000_handle_option (size_t, const char *, int); +static int rs6000_loop_align_max_skip (rtx); +static void rs6000_parse_yes_no_option (const char *, const char *, int *); +static int first_altivec_reg_to_save (void); +static unsigned int compute_vrsave_mask (void); +static void compute_save_world_info (rs6000_stack_t *info_ptr); +static void is_altivec_return_reg (rtx, void *); +static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int); +int easy_vector_constant (rtx, enum machine_mode); +static rtx rs6000_dwarf_register_span (rtx); +static void rs6000_init_dwarf_reg_sizes_extra (tree); +static rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode); +static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode); +static rtx rs6000_legitimize_tls_address (rtx, enum tls_model); +static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; +static rtx rs6000_delegitimize_address (rtx); +static rtx rs6000_tls_get_addr (void); +static rtx rs6000_got_sym (void); +static int rs6000_tls_symbol_ref_1 (rtx *, void *); +static const char *rs6000_get_some_local_dynamic_name (void); +static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *); +static rtx rs6000_complex_function_value (enum machine_mode); +static rtx rs6000_spe_function_arg (const CUMULATIVE_ARGS *, + enum machine_mode, const_tree); +static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *, + HOST_WIDE_INT, int); +static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *, + const_tree, + HOST_WIDE_INT); +static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *, + HOST_WIDE_INT, + rtx[], int *); +static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *, + const_tree, HOST_WIDE_INT, + rtx[], int *); +static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree, bool, bool); +static rtx rs6000_mixed_function_arg (enum machine_mode, const_tree, int); +static void rs6000_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode, + const_tree, bool); +static rtx rs6000_function_arg (CUMULATIVE_ARGS *, enum machine_mode, + const_tree, bool); +static unsigned int rs6000_function_arg_boundary (enum machine_mode, + const_tree); +static void rs6000_move_block_from_reg (int regno, rtx x, int nregs); +static void setup_incoming_varargs (CUMULATIVE_ARGS *, + enum machine_mode, tree, + int *, int); +static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode, + const_tree, bool); +static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode, + tree, bool); +static const char *invalid_arg_for_unprototyped_fn (const_tree, const_tree, const_tree); +#if TARGET_MACHO +static void macho_branch_islands (void); +static int no_previous_def (tree function_name); +static tree get_prev_label (tree function_name); +static void rs6000_darwin_file_start (void); +#endif + +static tree rs6000_build_builtin_va_list (void); +static void rs6000_va_start (tree, rtx); +static tree rs6000_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *); +static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree); +static bool rs6000_scalar_mode_supported_p (enum machine_mode); +static bool rs6000_vector_mode_supported_p (enum machine_mode); +static rtx rs6000_emit_vector_compare_inner (enum rtx_code, rtx, rtx); +static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx, + enum machine_mode); +static tree rs6000_stack_protect_fail (void); + +static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int, + int, int *); + +static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int, + int, int, int *); + +rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int, + int, int *) + = rs6000_legitimize_reload_address; + +static bool rs6000_mode_dependent_address_p (const_rtx); +static bool rs6000_mode_dependent_address (const_rtx); +static bool rs6000_debug_mode_dependent_address (const_rtx); +static bool (*rs6000_mode_dependent_address_ptr) (const_rtx) + = rs6000_mode_dependent_address; + +static enum reg_class rs6000_secondary_reload_class (enum reg_class, + enum machine_mode, rtx); +static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class, + enum machine_mode, + rtx); +enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class, + enum machine_mode, rtx) + = rs6000_secondary_reload_class; + +static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class); +static enum reg_class rs6000_debug_preferred_reload_class (rtx, + enum reg_class); +enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class) + = rs6000_preferred_reload_class; + +static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class, + enum machine_mode); + +static bool rs6000_debug_secondary_memory_needed (enum reg_class, + enum reg_class, + enum machine_mode); + +bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class, + enum machine_mode) + = rs6000_secondary_memory_needed; + +static bool rs6000_cannot_change_mode_class (enum machine_mode, + enum machine_mode, + enum reg_class); +static bool rs6000_debug_cannot_change_mode_class (enum machine_mode, + enum machine_mode, + enum reg_class); + +bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode, + enum machine_mode, + enum reg_class) + = rs6000_cannot_change_mode_class; + +static reg_class_t rs6000_secondary_reload (bool, rtx, reg_class_t, + enum machine_mode, + struct secondary_reload_info *); + +static const reg_class_t *rs6000_ira_cover_classes (void); + +const int INSN_NOT_AVAILABLE = -1; +static enum machine_mode rs6000_eh_return_filter_mode (void); +static bool rs6000_can_eliminate (const int, const int); +static void rs6000_conditional_register_usage (void); +static void rs6000_trampoline_init (rtx, tree, rtx); +static bool rs6000_cannot_force_const_mem (rtx); + +/* Hash table stuff for keeping track of TOC entries. */ + +struct GTY(()) toc_hash_struct +{ + /* `key' will satisfy CONSTANT_P; in fact, it will satisfy + ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */ + rtx key; + enum machine_mode key_mode; + int labelno; +}; + +static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table; + +/* Hash table to keep track of the argument types for builtin functions. */ + +struct GTY(()) builtin_hash_struct +{ + tree type; + enum machine_mode mode[4]; /* return value + 3 arguments. */ + unsigned char uns_p[4]; /* and whether the types are unsigned. */ +}; + +static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table; + +static bool rs6000_valid_attribute_p (tree, tree, tree, int); +static void rs6000_function_specific_save (struct cl_target_option *); +static void rs6000_function_specific_restore (struct cl_target_option *); +static void rs6000_function_specific_print (FILE *, int, + struct cl_target_option *); +static bool rs6000_can_inline_p (tree, tree); +static void rs6000_set_current_function (tree); + + +/* Default register names. */ +char rs6000_reg_names[][8] = +{ + "0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "10", "11", "12", "13", "14", "15", + "16", "17", "18", "19", "20", "21", "22", "23", + "24", "25", "26", "27", "28", "29", "30", "31", + "0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "10", "11", "12", "13", "14", "15", + "16", "17", "18", "19", "20", "21", "22", "23", + "24", "25", "26", "27", "28", "29", "30", "31", + "mq", "lr", "ctr","ap", + "0", "1", "2", "3", "4", "5", "6", "7", + "ca", + /* AltiVec registers. */ + "0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "10", "11", "12", "13", "14", "15", + "16", "17", "18", "19", "20", "21", "22", "23", + "24", "25", "26", "27", "28", "29", "30", "31", + "vrsave", "vscr", + /* SPE registers. */ + "spe_acc", "spefscr", + /* Soft frame pointer. */ + "sfp" +}; + +#ifdef TARGET_REGNAMES +static const char alt_reg_names[][8] = +{ + "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", + "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23", + "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31", + "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7", + "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15", + "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23", + "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31", + "mq", "lr", "ctr", "ap", + "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7", + "ca", + /* AltiVec registers. */ + "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7", + "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15", + "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23", + "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31", + "vrsave", "vscr", + /* SPE registers. */ + "spe_acc", "spefscr", + /* Soft frame pointer. */ + "sfp" +}; +#endif + +/* Table of valid machine attributes. */ + +static const struct attribute_spec rs6000_attribute_table[] = +{ + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ + { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute }, + { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute }, + { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute }, + { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute }, + { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute }, +#ifdef SUBTARGET_ATTRIBUTE_TABLE + SUBTARGET_ATTRIBUTE_TABLE, +#endif + { NULL, 0, 0, false, false, false, NULL } +}; + +/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */ +static const struct default_options rs6000_option_optimization_table[] = + { + { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 }, + { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + +#ifndef MASK_STRICT_ALIGN +#define MASK_STRICT_ALIGN 0 +#endif +#ifndef TARGET_PROFILE_KERNEL +#define TARGET_PROFILE_KERNEL 0 +#endif + +/* The VRSAVE bitmask puts bit %v0 as the most significant bit. */ +#define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO)) + +/* Initialize the GCC target structure. */ +#undef TARGET_ATTRIBUTE_TABLE +#define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table +#undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES +#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes +#undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P +#define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p + +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP + +/* Default unaligned ops are only provided for ELF. Find the ops needed + for non-ELF systems. */ +#ifndef OBJECT_FORMAT_ELF +#if TARGET_XCOFF +/* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on + 64-bit targets. */ +#undef TARGET_ASM_UNALIGNED_HI_OP +#define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2," +#undef TARGET_ASM_UNALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4," +#undef TARGET_ASM_UNALIGNED_DI_OP +#define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8," +#else +/* For Darwin. */ +#undef TARGET_ASM_UNALIGNED_HI_OP +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t" +#undef TARGET_ASM_UNALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t" +#undef TARGET_ASM_UNALIGNED_DI_OP +#define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t" +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t" +#endif +#endif + +/* This hook deals with fixups for relocatable code and DI-mode objects + in 64-bit code. */ +#undef TARGET_ASM_INTEGER +#define TARGET_ASM_INTEGER rs6000_assemble_integer + +#ifdef HAVE_GAS_HIDDEN +#undef TARGET_ASM_ASSEMBLE_VISIBILITY +#define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility +#endif + +#undef TARGET_HAVE_TLS +#define TARGET_HAVE_TLS HAVE_AS_TLS + +#undef TARGET_CANNOT_FORCE_CONST_MEM +#define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem + +#undef TARGET_DELEGITIMIZE_ADDRESS +#define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address + +#undef TARGET_ASM_FUNCTION_PROLOGUE +#define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue +#undef TARGET_ASM_FUNCTION_EPILOGUE +#define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue + +#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA +#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra + +#undef TARGET_LEGITIMIZE_ADDRESS +#define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address + +#undef TARGET_SCHED_VARIABLE_ISSUE +#define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue + +#undef TARGET_SCHED_ISSUE_RATE +#define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate +#undef TARGET_SCHED_ADJUST_COST +#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost +#undef TARGET_SCHED_ADJUST_PRIORITY +#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority +#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE +#define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence +#undef TARGET_SCHED_INIT +#define TARGET_SCHED_INIT rs6000_sched_init +#undef TARGET_SCHED_FINISH +#define TARGET_SCHED_FINISH rs6000_sched_finish +#undef TARGET_SCHED_REORDER +#define TARGET_SCHED_REORDER rs6000_sched_reorder +#undef TARGET_SCHED_REORDER2 +#define TARGET_SCHED_REORDER2 rs6000_sched_reorder2 + +#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD +#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead + +#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD +#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard + +#undef TARGET_SCHED_ALLOC_SCHED_CONTEXT +#define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context +#undef TARGET_SCHED_INIT_SCHED_CONTEXT +#define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context +#undef TARGET_SCHED_SET_SCHED_CONTEXT +#define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context +#undef TARGET_SCHED_FREE_SCHED_CONTEXT +#define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context + +#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD +#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load +#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN +#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even +#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD +#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd +#undef TARGET_VECTORIZE_BUILTIN_CONVERSION +#define TARGET_VECTORIZE_BUILTIN_CONVERSION rs6000_builtin_conversion +#undef TARGET_VECTORIZE_BUILTIN_VEC_PERM +#define TARGET_VECTORIZE_BUILTIN_VEC_PERM rs6000_builtin_vec_perm +#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT +#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \ + rs6000_builtin_support_vector_misalignment +#undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE +#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable +#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST +#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ + rs6000_builtin_vectorization_cost +#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE +#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \ + rs6000_preferred_simd_mode + +#undef TARGET_INIT_BUILTINS +#define TARGET_INIT_BUILTINS rs6000_init_builtins +#undef TARGET_BUILTIN_DECL +#define TARGET_BUILTIN_DECL rs6000_builtin_decl + +#undef TARGET_EXPAND_BUILTIN +#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin + +#undef TARGET_MANGLE_TYPE +#define TARGET_MANGLE_TYPE rs6000_mangle_type + +#undef TARGET_INIT_LIBFUNCS +#define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs + +#if TARGET_MACHO +#undef TARGET_BINDS_LOCAL_P +#define TARGET_BINDS_LOCAL_P darwin_binds_local_p +#endif + +#undef TARGET_MS_BITFIELD_LAYOUT_P +#define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p + +#undef TARGET_ASM_OUTPUT_MI_THUNK +#define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk + +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true + +#undef TARGET_FUNCTION_OK_FOR_SIBCALL +#define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall + +#undef TARGET_INVALID_WITHIN_DOLOOP +#define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop + +#undef TARGET_REGISTER_MOVE_COST +#define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost +#undef TARGET_MEMORY_MOVE_COST +#define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost +#undef TARGET_RTX_COSTS +#define TARGET_RTX_COSTS rs6000_rtx_costs +#undef TARGET_ADDRESS_COST +#define TARGET_ADDRESS_COST hook_int_rtx_bool_0 + +#undef TARGET_DWARF_REGISTER_SPAN +#define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span + +#undef TARGET_INIT_DWARF_REG_SIZES_EXTRA +#define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra + +/* On rs6000, function arguments are promoted, as are function return + values. */ +#undef TARGET_PROMOTE_FUNCTION_MODE +#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote + +#undef TARGET_RETURN_IN_MEMORY +#define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory + +#undef TARGET_SETUP_INCOMING_VARARGS +#define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs + +/* Always strict argument naming on rs6000. */ +#undef TARGET_STRICT_ARGUMENT_NAMING +#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true +#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED +#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true +#undef TARGET_SPLIT_COMPLEX_ARG +#define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true +#undef TARGET_MUST_PASS_IN_STACK +#define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack +#undef TARGET_PASS_BY_REFERENCE +#define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference +#undef TARGET_ARG_PARTIAL_BYTES +#define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes +#undef TARGET_FUNCTION_ARG_ADVANCE +#define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance +#undef TARGET_FUNCTION_ARG +#define TARGET_FUNCTION_ARG rs6000_function_arg +#undef TARGET_FUNCTION_ARG_BOUNDARY +#define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary + +#undef TARGET_BUILD_BUILTIN_VA_LIST +#define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list + +#undef TARGET_EXPAND_BUILTIN_VA_START +#define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start + +#undef TARGET_GIMPLIFY_VA_ARG_EXPR +#define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg + +#undef TARGET_EH_RETURN_FILTER_MODE +#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode + +#undef TARGET_SCALAR_MODE_SUPPORTED_P +#define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p + +#undef TARGET_VECTOR_MODE_SUPPORTED_P +#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p + +#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN +#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn + +#undef TARGET_HANDLE_OPTION +#define TARGET_HANDLE_OPTION rs6000_handle_option + +#undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP +#define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip + +#undef TARGET_OPTION_OVERRIDE +#define TARGET_OPTION_OVERRIDE rs6000_option_override + +#undef TARGET_OPTION_INIT_STRUCT +#define TARGET_OPTION_INIT_STRUCT rs6000_option_init_struct + +#undef TARGET_OPTION_DEFAULT_PARAMS +#define TARGET_OPTION_DEFAULT_PARAMS rs6000_option_default_params + +#undef TARGET_OPTION_OPTIMIZATION_TABLE +#define TARGET_OPTION_OPTIMIZATION_TABLE rs6000_option_optimization_table + +#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION +#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \ + rs6000_builtin_vectorized_function + +#undef TARGET_DEFAULT_TARGET_FLAGS +#define TARGET_DEFAULT_TARGET_FLAGS \ + (TARGET_DEFAULT) + +#undef TARGET_STACK_PROTECT_FAIL +#define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail + +/* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors + The PowerPC architecture requires only weak consistency among + processors--that is, memory accesses between processors need not be + sequentially consistent and memory accesses among processors can occur + in any order. The ability to order memory accesses weakly provides + opportunities for more efficient use of the system bus. Unless a + dependency exists, the 604e allows read operations to precede store + operations. */ +#undef TARGET_RELAXED_ORDERING +#define TARGET_RELAXED_ORDERING true + +#ifdef HAVE_AS_TLS +#undef TARGET_ASM_OUTPUT_DWARF_DTPREL +#define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel +#endif + +/* Use a 32-bit anchor range. This leads to sequences like: + + addis tmp,anchor,high + add dest,tmp,low + + where tmp itself acts as an anchor, and can be shared between + accesses to the same 64k page. */ +#undef TARGET_MIN_ANCHOR_OFFSET +#define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1 +#undef TARGET_MAX_ANCHOR_OFFSET +#define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff +#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P +#define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p + +#undef TARGET_BUILTIN_RECIPROCAL +#define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal + +#undef TARGET_EXPAND_TO_RTL_HOOK +#define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot + +#undef TARGET_INSTANTIATE_DECLS +#define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls + +#undef TARGET_SECONDARY_RELOAD +#define TARGET_SECONDARY_RELOAD rs6000_secondary_reload + +#undef TARGET_IRA_COVER_CLASSES +#define TARGET_IRA_COVER_CLASSES rs6000_ira_cover_classes + +#undef TARGET_LEGITIMATE_ADDRESS_P +#define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p + +#undef TARGET_MODE_DEPENDENT_ADDRESS_P +#define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p + +#undef TARGET_CAN_ELIMINATE +#define TARGET_CAN_ELIMINATE rs6000_can_eliminate + +#undef TARGET_CONDITIONAL_REGISTER_USAGE +#define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage + +#undef TARGET_TRAMPOLINE_INIT +#define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init + +#undef TARGET_FUNCTION_VALUE +#define TARGET_FUNCTION_VALUE rs6000_function_value + +#undef TARGET_OPTION_VALID_ATTRIBUTE_P +#define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p + +#undef TARGET_OPTION_SAVE +#define TARGET_OPTION_SAVE rs6000_function_specific_save + +#undef TARGET_OPTION_RESTORE +#define TARGET_OPTION_RESTORE rs6000_function_specific_restore + +#undef TARGET_OPTION_PRINT +#define TARGET_OPTION_PRINT rs6000_function_specific_print + +#undef TARGET_CAN_INLINE_P +#define TARGET_CAN_INLINE_P rs6000_can_inline_p + +#undef TARGET_SET_CURRENT_FUNCTION +#define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function + +struct gcc_target targetm = TARGET_INITIALIZER; + + +/* Simplifications for entries below. */ + +enum { + POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS, + POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC +}; + +/* Some OSs don't support saving the high part of 64-bit registers on context + switch. Other OSs don't support saving Altivec registers. On those OSs, we + don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants + either, the user must explicitly specify them and we won't interfere with + the user's specification. */ + +enum { + POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING, + POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN + | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC + | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW + | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP + | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE + | MASK_RECIP_PRECISION) +}; + +/* Masks for instructions set at various powerpc ISAs. */ +enum { + ISA_2_1_MASKS = MASK_MFCRF, + ISA_2_2_MASKS = (ISA_2_1_MASKS | MASK_POPCNTB), + ISA_2_4_MASKS = (ISA_2_2_MASKS | MASK_FPRND), + + /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add + ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel, + fre, fsqrt, etc. were no longer documented as optional. Group masks by + server and embedded. */ + ISA_2_5_MASKS_EMBEDDED = (ISA_2_2_MASKS | MASK_CMPB | MASK_RECIP_PRECISION + | MASK_PPC_GFXOPT | MASK_PPC_GPOPT), + ISA_2_5_MASKS_SERVER = (ISA_2_5_MASKS_EMBEDDED | MASK_DFP), + + /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but + altivec is a win so enable it. */ + ISA_2_6_MASKS_EMBEDDED = (ISA_2_5_MASKS_EMBEDDED | MASK_POPCNTD), + ISA_2_6_MASKS_SERVER = (ISA_2_5_MASKS_SERVER | MASK_POPCNTD | MASK_ALTIVEC + | MASK_VSX) +}; + +/* This table occasionally claims that a processor does not support a + particular feature even though it does, but the feature is slower than the + alternative. Thus, it shouldn't be relied on as a complete description of + the processor's support. + + Please keep this list in order, and don't forget to update the documentation + in invoke.texi when adding a new processor or flag. */ + +struct rs6000_ptt +{ + const char *const name; /* Canonical processor name. */ + const enum processor_type processor; /* Processor type enum value. */ + const int target_enable; /* Target flags to enable. */ +}; + +static struct rs6000_ptt const processor_target_table[] = +{ + {"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"403", PROCESSOR_PPC403, + POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN}, + {"405", PROCESSOR_PPC405, + POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB}, + {"405fp", PROCESSOR_PPC405, + POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB}, + {"440", PROCESSOR_PPC440, + POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB}, + {"440fp", PROCESSOR_PPC440, + POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB}, + {"464", PROCESSOR_PPC440, + POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB}, + {"464fp", PROCESSOR_PPC440, + POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB}, + {"476", PROCESSOR_PPC476, + POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_PPC_GFXOPT | MASK_MFCRF + | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_MULHW | MASK_DLMZB}, + {"476fp", PROCESSOR_PPC476, + POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB + | MASK_FPRND | MASK_CMPB | MASK_MULHW | MASK_DLMZB}, + {"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK}, + {"601", PROCESSOR_PPC601, + MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING}, + {"602", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"603", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"604", PROCESSOR_PPC604, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"604e", PROCESSOR_PPC604e, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"620", PROCESSOR_PPC620, + POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}, + {"630", PROCESSOR_PPC630, + POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}, + {"740", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"7400", PROCESSOR_PPC7400, POWERPC_7400_MASK}, + {"7450", PROCESSOR_PPC7450, POWERPC_7400_MASK}, + {"750", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN + | MASK_ISEL}, + /* 8548 has a dummy entry for now. */ + {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN + | MASK_ISEL}, + {"a2", PROCESSOR_PPCA2, + POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_POPCNTB + | MASK_CMPB | MASK_NO_UPDATE }, + {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK}, + {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT + | MASK_ISEL}, + {"e500mc64", PROCESSOR_PPCE500MC64, POWERPC_BASE_MASK | MASK_POWERPC64 + | MASK_PPC_GFXOPT | MASK_ISEL}, + {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"970", PROCESSOR_POWER4, + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64}, + {"cell", PROCESSOR_CELL, + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64}, + {"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS}, + {"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT}, + {"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT}, + {"G4", PROCESSOR_PPC7450, POWERPC_7400_MASK}, + {"G5", PROCESSOR_POWER4, + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64}, + {"titan", PROCESSOR_TITAN, + POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB}, + {"power", PROCESSOR_POWER, MASK_POWER | MASK_MULTIPLE | MASK_STRING}, + {"power2", PROCESSOR_POWER, + MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING}, + {"power3", PROCESSOR_PPC630, + POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}, + {"power4", PROCESSOR_POWER4, + POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT + | MASK_MFCRF}, + {"power5", PROCESSOR_POWER5, + POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT + | MASK_MFCRF | MASK_POPCNTB}, + {"power5+", PROCESSOR_POWER5, + POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT + | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND}, + {"power6", PROCESSOR_POWER6, + POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT + | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP + | MASK_RECIP_PRECISION}, + {"power6x", PROCESSOR_POWER6, + POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT + | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP + | MASK_MFPGPR | MASK_RECIP_PRECISION}, + {"power7", PROCESSOR_POWER7, /* Don't add MASK_ISEL by default */ + POWERPC_7400_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_MFCRF + | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP | MASK_POPCNTD + | MASK_VSX | MASK_RECIP_PRECISION}, + {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK}, + {"powerpc64", PROCESSOR_POWERPC64, + POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}, + {"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING}, + {"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING}, + {"rios2", PROCESSOR_RIOS2, + MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING}, + {"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING}, + {"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING}, + {"rs64", PROCESSOR_RS64A, + POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64} +}; + +/* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the + name is invalid. */ + +static int +rs6000_cpu_name_lookup (const char *name) +{ + size_t i; + + if (name != NULL) + { + for (i = 0; i < ARRAY_SIZE (processor_target_table); i++) + if (! strcmp (name, processor_target_table[i].name)) + return (int)i; + } + + return -1; +} + + +/* Return number of consecutive hard regs needed starting at reg REGNO + to hold something of mode MODE. + This is ordinarily the length in words of a value of mode MODE + but can be less for certain modes in special long registers. + + For the SPE, GPRs are 64 bits but only 32 bits are visible in + scalar instructions. The upper 32 bits are only available to the + SIMD instructions. + + POWER and PowerPC GPRs hold 32 bits worth; + PowerPC64 GPRs and FPRs point register holds 64 bits worth. */ + +static int +rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode) +{ + unsigned HOST_WIDE_INT reg_size; + + if (FP_REGNO_P (regno)) + reg_size = (VECTOR_MEM_VSX_P (mode) + ? UNITS_PER_VSX_WORD + : UNITS_PER_FP_WORD); + + else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode)) + reg_size = UNITS_PER_SPE_WORD; + + else if (ALTIVEC_REGNO_P (regno)) + reg_size = UNITS_PER_ALTIVEC_WORD; + + /* The value returned for SCmode in the E500 double case is 2 for + ABI compatibility; storing an SCmode value in a single register + would require function_arg and rs6000_spe_function_arg to handle + SCmode so as to pass the value correctly in a pair of + registers. */ + else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode + && !DECIMAL_FLOAT_MODE_P (mode)) + reg_size = UNITS_PER_FP_WORD; + + else + reg_size = UNITS_PER_WORD; + + return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size; +} + +/* Value is 1 if hard register REGNO can hold a value of machine-mode + MODE. */ +static int +rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode) +{ + int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1; + + /* VSX registers that overlap the FPR registers are larger than for non-VSX + implementations. Don't allow an item to be split between a FP register + and an Altivec register. */ + if (VECTOR_MEM_VSX_P (mode)) + { + if (FP_REGNO_P (regno)) + return FP_REGNO_P (last_regno); + + if (ALTIVEC_REGNO_P (regno)) + return ALTIVEC_REGNO_P (last_regno); + } + + /* The GPRs can hold any mode, but values bigger than one register + cannot go past R31. */ + if (INT_REGNO_P (regno)) + return INT_REGNO_P (last_regno); + + /* The float registers (except for VSX vector modes) can only hold floating + modes and DImode. This excludes the 32-bit decimal float mode for + now. */ + if (FP_REGNO_P (regno)) + { + if (SCALAR_FLOAT_MODE_P (mode) + && (mode != TDmode || (regno % 2) == 0) + && FP_REGNO_P (last_regno)) + return 1; + + if (GET_MODE_CLASS (mode) == MODE_INT + && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD) + return 1; + + if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT + && PAIRED_VECTOR_MODE (mode)) + return 1; + + return 0; + } + + /* The CR register can only hold CC modes. */ + if (CR_REGNO_P (regno)) + return GET_MODE_CLASS (mode) == MODE_CC; + + if (CA_REGNO_P (regno)) + return mode == BImode; + + /* AltiVec only in AldyVec registers. */ + if (ALTIVEC_REGNO_P (regno)) + return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode); + + /* ...but GPRs can hold SIMD data on the SPE in one register. */ + if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode)) + return 1; + + /* We cannot put TImode anywhere except general register and it must be able + to fit within the register set. In the future, allow TImode in the + Altivec or VSX registers. */ + + return GET_MODE_SIZE (mode) <= UNITS_PER_WORD; +} + +/* Print interesting facts about registers. */ +static void +rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name) +{ + int r, m; + + for (r = first_regno; r <= last_regno; ++r) + { + const char *comma = ""; + int len; + + if (first_regno == last_regno) + fprintf (stderr, "%s:\t", reg_name); + else + fprintf (stderr, "%s%d:\t", reg_name, r - first_regno); + + len = 8; + for (m = 0; m < NUM_MACHINE_MODES; ++m) + if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r]) + { + if (len > 70) + { + fprintf (stderr, ",\n\t"); + len = 8; + comma = ""; + } + + if (rs6000_hard_regno_nregs[m][r] > 1) + len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m), + rs6000_hard_regno_nregs[m][r]); + else + len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m)); + + comma = ", "; + } + + if (call_used_regs[r]) + { + if (len > 70) + { + fprintf (stderr, ",\n\t"); + len = 8; + comma = ""; + } + + len += fprintf (stderr, "%s%s", comma, "call-used"); + comma = ", "; + } + + if (fixed_regs[r]) + { + if (len > 70) + { + fprintf (stderr, ",\n\t"); + len = 8; + comma = ""; + } + + len += fprintf (stderr, "%s%s", comma, "fixed"); + comma = ", "; + } + + if (len > 70) + { + fprintf (stderr, ",\n\t"); + comma = ""; + } + + fprintf (stderr, "%sregno = %d\n", comma, r); + } +} + +#define DEBUG_FMT_D "%-32s= %d\n" +#define DEBUG_FMT_S "%-32s= %s\n" + +/* Print various interesting information with -mdebug=reg. */ +static void +rs6000_debug_reg_global (void) +{ + static const char *const tf[2] = { "false", "true" }; + const char *nl = (const char *)0; + int m; + char costly_num[20]; + char nop_num[20]; + const char *costly_str; + const char *nop_str; + const char *trace_str; + const char *abi_str; + const char *cmodel_str; + + /* Map enum rs6000_vector to string. */ + static const char *rs6000_debug_vector_unit[] = { + "none", + "altivec", + "vsx", + "paired", + "spe", + "other" + }; + + fprintf (stderr, "Register information: (last virtual reg = %d)\n", + LAST_VIRTUAL_REGISTER); + rs6000_debug_reg_print (0, 31, "gr"); + rs6000_debug_reg_print (32, 63, "fp"); + rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO, + LAST_ALTIVEC_REGNO, + "vs"); + rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr"); + rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr"); + rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr"); + rs6000_debug_reg_print (MQ_REGNO, MQ_REGNO, "mq"); + rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca"); + rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave"); + rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr"); + rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a"); + rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f"); + + fprintf (stderr, + "\n" + "d reg_class = %s\n" + "f reg_class = %s\n" + "v reg_class = %s\n" + "wa reg_class = %s\n" + "wd reg_class = %s\n" + "wf reg_class = %s\n" + "ws reg_class = %s\n\n", + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]); + + for (m = 0; m < NUM_MACHINE_MODES; ++m) + if (rs6000_vector_unit[m] || rs6000_vector_mem[m]) + { + nl = "\n"; + fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n", + GET_MODE_NAME (m), + rs6000_debug_vector_unit[ rs6000_vector_unit[m] ], + rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]); + } + + if (nl) + fputs (nl, stderr); + + if (rs6000_recip_control) + { + fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control); + + for (m = 0; m < NUM_MACHINE_MODES; ++m) + if (rs6000_recip_bits[m]) + { + fprintf (stderr, + "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n", + GET_MODE_NAME (m), + (RS6000_RECIP_AUTO_RE_P (m) + ? "auto" + : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")), + (RS6000_RECIP_AUTO_RSQRTE_P (m) + ? "auto" + : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none"))); + } + + fputs ("\n", stderr); + } + + if (rs6000_cpu_index >= 0) + fprintf (stderr, DEBUG_FMT_S, "cpu", + processor_target_table[rs6000_cpu_index].name); + + if (rs6000_tune_index >= 0) + fprintf (stderr, DEBUG_FMT_S, "tune", + processor_target_table[rs6000_tune_index].name); + + switch (rs6000_sched_costly_dep) + { + case max_dep_latency: + costly_str = "max_dep_latency"; + break; + + case no_dep_costly: + costly_str = "no_dep_costly"; + break; + + case all_deps_costly: + costly_str = "all_deps_costly"; + break; + + case true_store_to_load_dep_costly: + costly_str = "true_store_to_load_dep_costly"; + break; + + case store_to_load_dep_costly: + costly_str = "store_to_load_dep_costly"; + break; + + default: + costly_str = costly_num; + sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep); + break; + } + + fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str); + + switch (rs6000_sched_insert_nops) + { + case sched_finish_regroup_exact: + nop_str = "sched_finish_regroup_exact"; + break; + + case sched_finish_pad_groups: + nop_str = "sched_finish_pad_groups"; + break; + + case sched_finish_none: + nop_str = "sched_finish_none"; + break; + + default: + nop_str = nop_num; + sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops); + break; + } + + fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str); + + switch (rs6000_sdata) + { + default: + case SDATA_NONE: + break; + + case SDATA_DATA: + fprintf (stderr, DEBUG_FMT_S, "sdata", "data"); + break; + + case SDATA_SYSV: + fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv"); + break; + + case SDATA_EABI: + fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi"); + break; + + } + + switch (rs6000_traceback) + { + case traceback_default: trace_str = "default"; break; + case traceback_none: trace_str = "none"; break; + case traceback_part: trace_str = "part"; break; + case traceback_full: trace_str = "full"; break; + default: trace_str = "unknown"; break; + } + + fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str); + + switch (rs6000_current_cmodel) + { + case CMODEL_SMALL: cmodel_str = "small"; break; + case CMODEL_MEDIUM: cmodel_str = "medium"; break; + case CMODEL_LARGE: cmodel_str = "large"; break; + default: cmodel_str = "unknown"; break; + } + + fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str); + + switch (rs6000_current_abi) + { + case ABI_NONE: abi_str = "none"; break; + case ABI_AIX: abi_str = "aix"; break; + case ABI_V4: abi_str = "V4"; break; + case ABI_DARWIN: abi_str = "darwin"; break; + default: abi_str = "unknown"; break; + } + + fprintf (stderr, DEBUG_FMT_S, "abi", abi_str); + + if (rs6000_altivec_abi) + fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true"); + + if (rs6000_spe_abi) + fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true"); + + if (rs6000_darwin64_abi) + fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true"); + + if (rs6000_float_gprs) + fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true"); + + fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]); + fprintf (stderr, DEBUG_FMT_S, "align_branch", + tf[!!rs6000_align_branch_targets]); + fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size); + fprintf (stderr, DEBUG_FMT_D, "long_double_size", + rs6000_long_double_type_size); + fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority", + (int)rs6000_sched_restricted_insns_priority); +} + +/* Initialize the various global tables that are based on register size. */ +static void +rs6000_init_hard_regno_mode_ok (bool global_init_p) +{ + int r, m, c; + int align64; + int align32; + + /* Precalculate REGNO_REG_CLASS. */ + rs6000_regno_regclass[0] = GENERAL_REGS; + for (r = 1; r < 32; ++r) + rs6000_regno_regclass[r] = BASE_REGS; + + for (r = 32; r < 64; ++r) + rs6000_regno_regclass[r] = FLOAT_REGS; + + for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r) + rs6000_regno_regclass[r] = NO_REGS; + + for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r) + rs6000_regno_regclass[r] = ALTIVEC_REGS; + + rs6000_regno_regclass[CR0_REGNO] = CR0_REGS; + for (r = CR1_REGNO; r <= CR7_REGNO; ++r) + rs6000_regno_regclass[r] = CR_REGS; + + rs6000_regno_regclass[MQ_REGNO] = MQ_REGS; + rs6000_regno_regclass[LR_REGNO] = LINK_REGS; + rs6000_regno_regclass[CTR_REGNO] = CTR_REGS; + rs6000_regno_regclass[CA_REGNO] = CA_REGS; + rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS; + rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS; + rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS; + rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS; + rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS; + rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS; + + /* Precalculate vector information, this must be set up before the + rs6000_hard_regno_nregs_internal below. */ + for (m = 0; m < NUM_MACHINE_MODES; ++m) + { + rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE; + rs6000_vector_reload[m][0] = CODE_FOR_nothing; + rs6000_vector_reload[m][1] = CODE_FOR_nothing; + } + + for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++) + rs6000_constraints[c] = NO_REGS; + + /* The VSX hardware allows native alignment for vectors, but control whether the compiler + believes it can use native alignment or still uses 128-bit alignment. */ + if (TARGET_VSX && !TARGET_VSX_ALIGN_128) + { + align64 = 64; + align32 = 32; + } + else + { + align64 = 128; + align32 = 128; + } + + /* V2DF mode, VSX only. */ + if (TARGET_VSX) + { + rs6000_vector_unit[V2DFmode] = VECTOR_VSX; + rs6000_vector_mem[V2DFmode] = VECTOR_VSX; + rs6000_vector_align[V2DFmode] = align64; + } + + /* V4SF mode, either VSX or Altivec. */ + if (TARGET_VSX) + { + rs6000_vector_unit[V4SFmode] = VECTOR_VSX; + rs6000_vector_mem[V4SFmode] = VECTOR_VSX; + rs6000_vector_align[V4SFmode] = align32; + } + else if (TARGET_ALTIVEC) + { + rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC; + rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC; + rs6000_vector_align[V4SFmode] = align32; + } + + /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads + and stores. */ + if (TARGET_ALTIVEC) + { + rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC; + rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC; + rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC; + rs6000_vector_align[V4SImode] = align32; + rs6000_vector_align[V8HImode] = align32; + rs6000_vector_align[V16QImode] = align32; + + if (TARGET_VSX) + { + rs6000_vector_mem[V4SImode] = VECTOR_VSX; + rs6000_vector_mem[V8HImode] = VECTOR_VSX; + rs6000_vector_mem[V16QImode] = VECTOR_VSX; + } + else + { + rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC; + rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC; + rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC; + } + } + + /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract. + Altivec doesn't have 64-bit support. */ + if (TARGET_VSX) + { + rs6000_vector_mem[V2DImode] = VECTOR_VSX; + rs6000_vector_unit[V2DImode] = VECTOR_NONE; + rs6000_vector_align[V2DImode] = align64; + } + + /* DFmode, see if we want to use the VSX unit. */ + if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE) + { + rs6000_vector_unit[DFmode] = VECTOR_VSX; + rs6000_vector_mem[DFmode] + = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE); + rs6000_vector_align[DFmode] = align64; + } + + /* TODO add SPE and paired floating point vector support. */ + + /* Register class constaints for the constraints that depend on compile + switches. */ + if (TARGET_HARD_FLOAT && TARGET_FPRS) + rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; + + if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) + rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; + + if (TARGET_VSX) + { + /* At present, we just use VSX_REGS, but we have different constraints + based on the use, in case we want to fine tune the default register + class used. wa = any VSX register, wf = register class to use for + V4SF, wd = register class to use for V2DF, and ws = register classs to + use for DF scalars. */ + rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS; + rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; + rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; + rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY + ? VSX_REGS + : FLOAT_REGS); + } + + if (TARGET_ALTIVEC) + rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS; + + /* Set up the reload helper functions. */ + if (TARGET_VSX || TARGET_ALTIVEC) + { + if (TARGET_64BIT) + { + rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store; + rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load; + rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store; + rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load; + rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store; + rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load; + rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store; + rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load; + rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store; + rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load; + rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store; + rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load; + } + else + { + rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store; + rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load; + rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store; + rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load; + rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store; + rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load; + rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store; + rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load; + rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store; + rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load; + rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store; + rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load; + } + } + + /* Precalculate HARD_REGNO_NREGS. */ + for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r) + for (m = 0; m < NUM_MACHINE_MODES; ++m) + rs6000_hard_regno_nregs[m][r] + = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m); + + /* Precalculate HARD_REGNO_MODE_OK. */ + for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r) + for (m = 0; m < NUM_MACHINE_MODES; ++m) + if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m)) + rs6000_hard_regno_mode_ok_p[m][r] = true; + + /* Precalculate CLASS_MAX_NREGS sizes. */ + for (c = 0; c < LIM_REG_CLASSES; ++c) + { + int reg_size; + + if (TARGET_VSX && VSX_REG_CLASS_P (c)) + reg_size = UNITS_PER_VSX_WORD; + + else if (c == ALTIVEC_REGS) + reg_size = UNITS_PER_ALTIVEC_WORD; + + else if (c == FLOAT_REGS) + reg_size = UNITS_PER_FP_WORD; + + else + reg_size = UNITS_PER_WORD; + + for (m = 0; m < NUM_MACHINE_MODES; ++m) + rs6000_class_max_nregs[m][c] + = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size; + } + + if (TARGET_E500_DOUBLE) + rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1; + + /* Calculate which modes to automatically generate code to use a the + reciprocal divide and square root instructions. In the future, possibly + automatically generate the instructions even if the user did not specify + -mrecip. The older machines double precision reciprocal sqrt estimate is + not accurate enough. */ + memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits)); + if (TARGET_FRES) + rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE; + if (TARGET_FRE) + rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE; + if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)) + rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE; + if (VECTOR_UNIT_VSX_P (V2DFmode)) + rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE; + + if (TARGET_FRSQRTES) + rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + if (TARGET_FRSQRTE) + rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)) + rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + if (VECTOR_UNIT_VSX_P (V2DFmode)) + rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + + if (rs6000_recip_control) + { + if (!flag_finite_math_only) + warning (0, "-mrecip requires -ffinite-math or -ffast-math"); + if (flag_trapping_math) + warning (0, "-mrecip requires -fno-trapping-math or -ffast-math"); + if (!flag_reciprocal_math) + warning (0, "-mrecip requires -freciprocal-math or -ffast-math"); + if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math) + { + if (RS6000_RECIP_HAVE_RE_P (SFmode) + && (rs6000_recip_control & RECIP_SF_DIV) != 0) + rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RE_P (DFmode) + && (rs6000_recip_control & RECIP_DF_DIV) != 0) + rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RE_P (V4SFmode) + && (rs6000_recip_control & RECIP_V4SF_DIV) != 0) + rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RE_P (V2DFmode) + && (rs6000_recip_control & RECIP_V2DF_DIV) != 0) + rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode) + && (rs6000_recip_control & RECIP_SF_RSQRT) != 0) + rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode) + && (rs6000_recip_control & RECIP_DF_RSQRT) != 0) + rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode) + && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0) + rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode) + && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0) + rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + } + } + + if (global_init_p || TARGET_DEBUG_TARGET) + { + if (TARGET_DEBUG_REG) + rs6000_debug_reg_global (); + + if (TARGET_DEBUG_COST || TARGET_DEBUG_REG) + fprintf (stderr, + "SImode variable mult cost = %d\n" + "SImode constant mult cost = %d\n" + "SImode short constant mult cost = %d\n" + "DImode multipliciation cost = %d\n" + "SImode division cost = %d\n" + "DImode division cost = %d\n" + "Simple fp operation cost = %d\n" + "DFmode multiplication cost = %d\n" + "SFmode division cost = %d\n" + "DFmode division cost = %d\n" + "cache line size = %d\n" + "l1 cache size = %d\n" + "l2 cache size = %d\n" + "simultaneous prefetches = %d\n" + "\n", + rs6000_cost->mulsi, + rs6000_cost->mulsi_const, + rs6000_cost->mulsi_const9, + rs6000_cost->muldi, + rs6000_cost->divsi, + rs6000_cost->divdi, + rs6000_cost->fp, + rs6000_cost->dmul, + rs6000_cost->sdiv, + rs6000_cost->ddiv, + rs6000_cost->cache_line_size, + rs6000_cost->l1_cache_size, + rs6000_cost->l2_cache_size, + rs6000_cost->simultaneous_prefetches); + } +} + +#if TARGET_MACHO +/* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */ + +static void +darwin_rs6000_override_options (void) +{ + /* The Darwin ABI always includes AltiVec, can't be (validly) turned + off. */ + rs6000_altivec_abi = 1; + TARGET_ALTIVEC_VRSAVE = 1; + rs6000_current_abi = ABI_DARWIN; + + if (DEFAULT_ABI == ABI_DARWIN + && TARGET_64BIT) + darwin_one_byte_bool = 1; + + if (TARGET_64BIT && ! TARGET_POWERPC64) + { + target_flags |= MASK_POWERPC64; + warning (0, "-m64 requires PowerPC64 architecture, enabling"); + } + if (flag_mkernel) + { + rs6000_default_long_calls = 1; + target_flags |= MASK_SOFT_FLOAT; + } + + /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes + Altivec. */ + if (!flag_mkernel && !flag_apple_kext + && TARGET_64BIT + && ! (target_flags_explicit & MASK_ALTIVEC)) + target_flags |= MASK_ALTIVEC; + + /* Unless the user (not the configurer) has explicitly overridden + it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to + G4 unless targetting the kernel. */ + if (!flag_mkernel + && !flag_apple_kext + && strverscmp (darwin_macosx_version_min, "10.5") >= 0 + && ! (target_flags_explicit & MASK_ALTIVEC) + && ! rs6000_select[1].string) + { + target_flags |= MASK_ALTIVEC; + } +} +#endif + +/* If not otherwise specified by a target, make 'long double' equivalent to + 'double'. */ + +#ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE +#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64 +#endif + +/* Override command line options. Mostly we process the processor type and + sometimes adjust other TARGET_ options. */ + +static bool +rs6000_option_override_internal (bool global_init_p) +{ + bool ret = true; + const char *default_cpu = OPTION_TARGET_CPU_DEFAULT; + int set_masks; + int cpu_index; + int tune_index; + struct cl_target_option *main_target_opt + = ((global_init_p || target_option_default_node == NULL) + ? NULL : TREE_TARGET_OPTION (target_option_default_node)); + + /* Numerous experiment shows that IRA based loop pressure + calculation works better for RTL loop invariant motion on targets + with enough (>= 32) registers. It is an expensive optimization. + So it is on only for peak performance. */ + if (optimize >= 3 && global_init_p) + flag_ira_loop_pressure = 1; + + /* Set the pointer size. */ + if (TARGET_64BIT) + { + rs6000_pmode = (int)DImode; + rs6000_pointer_size = 64; + } + else + { + rs6000_pmode = (int)SImode; + rs6000_pointer_size = 32; + } + + set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT; +#ifdef OS_MISSING_POWERPC64 + if (OS_MISSING_POWERPC64) + set_masks &= ~MASK_POWERPC64; +#endif +#ifdef OS_MISSING_ALTIVEC + if (OS_MISSING_ALTIVEC) + set_masks &= ~(MASK_ALTIVEC | MASK_VSX); +#endif + + /* Don't override by the processor default if given explicitly. */ + set_masks &= ~target_flags_explicit; + + /* Identify the processor type. */ + if (!default_cpu) + { + if (TARGET_POWERPC64) + default_cpu = "powerpc64"; + else if (TARGET_POWERPC) + default_cpu = "powerpc"; + } + + /* Process the -mcpu= and -mtune= argument. If the user changed + the cpu in a target attribute or pragma, but did not specify a tuning + option, use the cpu for the tuning option rather than the option specified + with -mtune on the command line. */ + if (rs6000_cpu_index > 0) + cpu_index = rs6000_cpu_index; + else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index > 0) + rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index; + else + rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu); + + if (rs6000_tune_index > 0) + tune_index = rs6000_tune_index; + else + rs6000_tune_index = tune_index = cpu_index; + + if (cpu_index >= 0) + { + target_flags &= ~set_masks; + target_flags |= (processor_target_table[cpu_index].target_enable + & set_masks); + } + + rs6000_cpu = ((tune_index >= 0) + ? processor_target_table[tune_index].processor + : (TARGET_POWERPC64 + ? PROCESSOR_DEFAULT64 + : PROCESSOR_DEFAULT)); + + if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3 + || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64) + { + if (TARGET_ALTIVEC) + error ("AltiVec not supported in this target"); + if (TARGET_SPE) + error ("SPE not supported in this target"); + } + + /* Disable Cell microcode if we are optimizing for the Cell + and not optimizing for size. */ + if (rs6000_gen_cell_microcode == -1) + rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL + && !optimize_size); + + /* If we are optimizing big endian systems for space and it's OK to + use instructions that would be microcoded on the Cell, use the + load/store multiple and string instructions. */ + if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode) + target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING); + + /* Don't allow -mmultiple or -mstring on little endian systems + unless the cpu is a 750, because the hardware doesn't support the + instructions used in little endian mode, and causes an alignment + trap. The 750 does not cause an alignment trap (except when the + target is unaligned). */ + + if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750) + { + if (TARGET_MULTIPLE) + { + target_flags &= ~MASK_MULTIPLE; + if ((target_flags_explicit & MASK_MULTIPLE) != 0) + warning (0, "-mmultiple is not supported on little endian systems"); + } + + if (TARGET_STRING) + { + target_flags &= ~MASK_STRING; + if ((target_flags_explicit & MASK_STRING) != 0) + warning (0, "-mstring is not supported on little endian systems"); + } + } + + /* Add some warnings for VSX. */ + if (TARGET_VSX) + { + const char *msg = NULL; + if (!TARGET_HARD_FLOAT || !TARGET_FPRS + || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT) + { + if (target_flags_explicit & MASK_VSX) + msg = N_("-mvsx requires hardware floating point"); + else + target_flags &= ~ MASK_VSX; + } + else if (TARGET_PAIRED_FLOAT) + msg = N_("-mvsx and -mpaired are incompatible"); + /* The hardware will allow VSX and little endian, but until we make sure + things like vector select, etc. work don't allow VSX on little endian + systems at this point. */ + else if (!BYTES_BIG_ENDIAN) + msg = N_("-mvsx used with little endian code"); + else if (TARGET_AVOID_XFORM > 0) + msg = N_("-mvsx needs indexed addressing"); + else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC)) + { + if (target_flags_explicit & MASK_VSX) + msg = N_("-mvsx and -mno-altivec are incompatible"); + else + msg = N_("-mno-altivec disables vsx"); + } + + if (msg) + { + warning (0, msg); + target_flags &= ~ MASK_VSX; + target_flags_explicit |= MASK_VSX; + } + } + + /* For the newer switches (vsx, dfp, etc.) set some of the older options, + unless the user explicitly used the -mno-