summaryrefslogtreecommitdiff
path: root/gcc/testsuite/gcc.dg/vmx/3c-01a.c
diff options
context:
space:
mode:
authorupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
committerupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
commit554fd8c5195424bdbcabf5de30fdc183aba391bd (patch)
tree976dc5ab7fddf506dadce60ae936f43f58787092 /gcc/testsuite/gcc.dg/vmx/3c-01a.c
downloadcbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2
cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository.
Diffstat (limited to 'gcc/testsuite/gcc.dg/vmx/3c-01a.c')
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3c-01a.c1450
1 files changed, 1450 insertions, 0 deletions
diff --git a/gcc/testsuite/gcc.dg/vmx/3c-01a.c b/gcc/testsuite/gcc.dg/vmx/3c-01a.c
new file mode 100644
index 000000000..2499ca665
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3c-01a.c
@@ -0,0 +1,1450 @@
+/* { dg-do compile } */
+#include <altivec.h>
+typedef const volatile unsigned int _1;
+typedef const unsigned int _2;
+typedef volatile unsigned int _3;
+typedef unsigned int _4;
+typedef const volatile vector bool short _5;
+typedef const vector bool short _6;
+typedef volatile vector bool short _7;
+typedef vector bool short _8;
+typedef const volatile signed short _9;
+typedef const signed short _10;
+typedef volatile signed short _11;
+typedef signed short _12;
+typedef const volatile unsigned _13;
+typedef const unsigned _14;
+typedef volatile unsigned _15;
+typedef unsigned _16;
+typedef const volatile signed short int _17;
+typedef const signed short int _18;
+typedef volatile signed short int _19;
+typedef signed short int _20;
+typedef const volatile unsigned short int _21;
+typedef const unsigned short int _22;
+typedef volatile unsigned short int _23;
+typedef unsigned short int _24;
+typedef const volatile vector pixel _25;
+typedef const vector pixel _26;
+typedef volatile vector pixel _27;
+typedef vector pixel _28;
+typedef const volatile vector bool int _29;
+typedef const vector bool int _30;
+typedef volatile vector bool int _31;
+typedef vector bool int _32;
+typedef const volatile vector signed char _33;
+typedef const vector signed char _34;
+typedef volatile vector signed char _35;
+typedef vector signed char _36;
+typedef const volatile unsigned _37;
+typedef const unsigned _38;
+typedef volatile unsigned _39;
+typedef unsigned _40;
+typedef const volatile signed int _41;
+typedef const signed int _42;
+typedef volatile signed int _43;
+typedef signed int _44;
+typedef const volatile vector float _45;
+typedef const vector float _46;
+typedef volatile vector float _47;
+typedef vector float _48;
+typedef const volatile vector signed short _49;
+typedef const vector signed short _50;
+typedef volatile vector signed short _51;
+typedef vector signed short _52;
+typedef const volatile unsigned char _53;
+typedef const unsigned char _54;
+typedef volatile unsigned char _55;
+typedef unsigned char _56;
+typedef const volatile signed int _57;
+typedef const signed int _58;
+typedef volatile signed int _59;
+typedef signed int _60;
+typedef const volatile unsigned int _61;
+typedef const unsigned int _62;
+typedef volatile unsigned int _63;
+typedef unsigned int _64;
+typedef const volatile unsigned short _65;
+typedef const unsigned short _66;
+typedef volatile unsigned short _67;
+typedef unsigned short _68;
+typedef const volatile short _69;
+typedef const short _70;
+typedef volatile short _71;
+typedef short _72;
+typedef const volatile int _73;
+typedef const int _74;
+typedef volatile int _75;
+typedef int _76;
+typedef const volatile vector unsigned short _77;
+typedef const vector unsigned short _78;
+typedef volatile vector unsigned short _79;
+typedef vector unsigned short _80;
+typedef const volatile vector bool char _81;
+typedef const vector bool char _82;
+typedef volatile vector bool char _83;
+typedef vector bool char _84;
+typedef const volatile signed _85;
+typedef const signed _86;
+typedef volatile signed _87;
+typedef signed _88;
+typedef const volatile vector signed int _89;
+typedef const vector signed int _90;
+typedef volatile vector signed int _91;
+typedef vector signed int _92;
+typedef const volatile vector unsigned int _93;
+typedef const vector unsigned int _94;
+typedef volatile vector unsigned int _95;
+typedef vector unsigned int _96;
+typedef const volatile signed _97;
+typedef const signed _98;
+typedef volatile signed _99;
+typedef signed _100;
+typedef const volatile short int _101;
+typedef const short int _102;
+typedef volatile short int _103;
+typedef short int _104;
+typedef const volatile int _105;
+typedef const int _106;
+typedef volatile int _107;
+typedef int _108;
+typedef const volatile int _109;
+typedef const int _110;
+typedef volatile int _111;
+typedef int _112;
+typedef const volatile vector unsigned char _113;
+typedef const vector unsigned char _114;
+typedef volatile vector unsigned char _115;
+typedef vector unsigned char _116;
+typedef const volatile signed char _117;
+typedef const signed char _118;
+typedef volatile signed char _119;
+typedef signed char _120;
+typedef const volatile float _121;
+typedef const float _122;
+typedef volatile float _123;
+typedef float _124;
+
+vector unsigned char u8;
+vector signed char s8;
+vector bool char b8;
+vector unsigned short u16;
+vector signed short s16;
+vector bool short b16;
+vector unsigned int u32;
+vector signed int s32;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+
+void f(void *p)
+{
+ u8 = vec_lvsl(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_1 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_1 *)p);
+ u8 = vec_lvsl(1,(const unsigned int *)p);
+ u8 = vec_lvsl(1,(_2 *)p);
+ u8 = vec_lvsr(1,(const unsigned int *)p);
+ u8 = vec_lvsr(1,(_2 *)p);
+ u32 = vec_ld(1,(const unsigned int *)p);
+ u32 = vec_ld(1,(_2 *)p);
+ u32 = vec_lde(1,(const unsigned int *)p);
+ u32 = vec_lde(1,(_2 *)p);
+ u32 = vec_ldl(1,(const unsigned int *)p);
+ u32 = vec_ldl(1,(_2 *)p);
+ vec_dst((const unsigned int *)p,1,1);
+ vec_dstst((const unsigned int *)p,1,1);
+ vec_dststt((const unsigned int *)p,1,1);
+ vec_dstt((const unsigned int *)p,1,1);
+ vec_dst((_2 *)p,1,1);
+ vec_dstst((_2 *)p,1,1);
+ vec_dststt((_2 *)p,1,1);
+ vec_dstt((_2 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_3 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_3 *)p);
+ u8 = vec_lvsl(1,( unsigned int *)p);
+ u8 = vec_lvsl(1,(_4 *)p);
+ u8 = vec_lvsr(1,( unsigned int *)p);
+ u8 = vec_lvsr(1,(_4 *)p);
+ u32 = vec_ld(1,( unsigned int *)p);
+ u32 = vec_ld(1,(_4 *)p);
+ u32 = vec_lde(1,( unsigned int *)p);
+ u32 = vec_lde(1,(_4 *)p);
+ u32 = vec_ldl(1,( unsigned int *)p);
+ u32 = vec_ldl(1,(_4 *)p);
+ vec_dst(( unsigned int *)p,1,1);
+ vec_dstst(( unsigned int *)p,1,1);
+ vec_dststt(( unsigned int *)p,1,1);
+ vec_dstt(( unsigned int *)p,1,1);
+ vec_dst((_4 *)p,1,1);
+ vec_dstst((_4 *)p,1,1);
+ vec_dststt((_4 *)p,1,1);
+ vec_dstt((_4 *)p,1,1);
+ vec_st(u32,1,( unsigned int *)p);
+ vec_st(u32,1,(_4 *)p);
+ vec_ste(u32,1,( unsigned int *)p);
+ vec_ste(u32,1,(_4 *)p);
+ vec_stl(u32,1,( unsigned int *)p);
+ vec_stl(u32,1,(_4 *)p);
+ b16 = vec_ld(1,(const vector bool short *)p);
+ b16 = vec_ld(1,(_6 *)p);
+ b16 = vec_ldl(1,(const vector bool short *)p);
+ b16 = vec_ldl(1,(_6 *)p);
+ vec_dst((const vector bool short *)p,1,1);
+ vec_dstst((const vector bool short *)p,1,1);
+ vec_dststt((const vector bool short *)p,1,1);
+ vec_dstt((const vector bool short *)p,1,1);
+ vec_dst((_6 *)p,1,1);
+ vec_dstst((_6 *)p,1,1);
+ vec_dststt((_6 *)p,1,1);
+ vec_dstt((_6 *)p,1,1);
+ b16 = vec_ld(1,( vector bool short *)p);
+ b16 = vec_ld(1,(_8 *)p);
+ b16 = vec_ldl(1,( vector bool short *)p);
+ b16 = vec_ldl(1,(_8 *)p);
+ vec_dst(( vector bool short *)p,1,1);
+ vec_dstst(( vector bool short *)p,1,1);
+ vec_dststt(( vector bool short *)p,1,1);
+ vec_dstt(( vector bool short *)p,1,1);
+ vec_dst((_8 *)p,1,1);
+ vec_dstst((_8 *)p,1,1);
+ vec_dststt((_8 *)p,1,1);
+ vec_dstt((_8 *)p,1,1);
+ vec_st(b16,1,( vector bool short *)p);
+ vec_st(b16,1,(_8 *)p);
+ vec_stl(b16,1,( vector bool short *)p);
+ vec_stl(b16,1,(_8 *)p);
+ u8 = vec_lvsl(1,(const volatile signed short *)p);
+ u8 = vec_lvsl(1,(_9 *)p);
+ u8 = vec_lvsr(1,(const volatile signed short *)p);
+ u8 = vec_lvsr(1,(_9 *)p);
+ u8 = vec_lvsl(1,(const signed short *)p);
+ u8 = vec_lvsl(1,(_10 *)p);
+ u8 = vec_lvsr(1,(const signed short *)p);
+ u8 = vec_lvsr(1,(_10 *)p);
+ s16 = vec_ld(1,(const signed short *)p);
+ s16 = vec_ld(1,(_10 *)p);
+ s16 = vec_lde(1,(const signed short *)p);
+ s16 = vec_lde(1,(_10 *)p);
+ s16 = vec_ldl(1,(const signed short *)p);
+ s16 = vec_ldl(1,(_10 *)p);
+ vec_dst((const signed short *)p,1,1);
+ vec_dstst((const signed short *)p,1,1);
+ vec_dststt((const signed short *)p,1,1);
+ vec_dstt((const signed short *)p,1,1);
+ vec_dst((_10 *)p,1,1);
+ vec_dstst((_10 *)p,1,1);
+ vec_dststt((_10 *)p,1,1);
+ vec_dstt((_10 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed short *)p);
+ u8 = vec_lvsl(1,(_11 *)p);
+ u8 = vec_lvsr(1,( volatile signed short *)p);
+ u8 = vec_lvsr(1,(_11 *)p);
+ u8 = vec_lvsl(1,( signed short *)p);
+ u8 = vec_lvsl(1,(_12 *)p);
+ u8 = vec_lvsr(1,( signed short *)p);
+ u8 = vec_lvsr(1,(_12 *)p);
+ s16 = vec_ld(1,( signed short *)p);
+ s16 = vec_ld(1,(_12 *)p);
+ s16 = vec_lde(1,( signed short *)p);
+ s16 = vec_lde(1,(_12 *)p);
+ s16 = vec_ldl(1,( signed short *)p);
+ s16 = vec_ldl(1,(_12 *)p);
+ vec_dst(( signed short *)p,1,1);
+ vec_dstst(( signed short *)p,1,1);
+ vec_dststt(( signed short *)p,1,1);
+ vec_dstt(( signed short *)p,1,1);
+ vec_dst((_12 *)p,1,1);
+ vec_dstst((_12 *)p,1,1);
+ vec_dststt((_12 *)p,1,1);
+ vec_dstt((_12 *)p,1,1);
+ vec_st(s16,1,( signed short *)p);
+ vec_st(s16,1,(_12 *)p);
+ vec_ste(s16,1,( signed short *)p);
+ vec_ste(s16,1,(_12 *)p);
+ vec_stl(s16,1,( signed short *)p);
+ vec_stl(s16,1,(_12 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_13 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_13 *)p);
+ u8 = vec_lvsl(1,(const unsigned *)p);
+ u8 = vec_lvsl(1,(_14 *)p);
+ u8 = vec_lvsr(1,(const unsigned *)p);
+ u8 = vec_lvsr(1,(_14 *)p);
+ u32 = vec_ld(1,(const unsigned *)p);
+ u32 = vec_ld(1,(_14 *)p);
+ u32 = vec_lde(1,(const unsigned *)p);
+ u32 = vec_lde(1,(_14 *)p);
+ u32 = vec_ldl(1,(const unsigned *)p);
+ u32 = vec_ldl(1,(_14 *)p);
+ vec_dst((const unsigned *)p,1,1);
+ vec_dstst((const unsigned *)p,1,1);
+ vec_dststt((const unsigned *)p,1,1);
+ vec_dstt((const unsigned *)p,1,1);
+ vec_dst((_14 *)p,1,1);
+ vec_dstst((_14 *)p,1,1);
+ vec_dststt((_14 *)p,1,1);
+ vec_dstt((_14 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_15 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_15 *)p);
+ u8 = vec_lvsl(1,( unsigned *)p);
+ u8 = vec_lvsl(1,(_16 *)p);
+ u8 = vec_lvsr(1,( unsigned *)p);
+ u8 = vec_lvsr(1,(_16 *)p);
+ u32 = vec_ld(1,( unsigned *)p);
+ u32 = vec_ld(1,(_16 *)p);
+ u32 = vec_lde(1,( unsigned *)p);
+ u32 = vec_lde(1,(_16 *)p);
+ u32 = vec_ldl(1,( unsigned *)p);
+ u32 = vec_ldl(1,(_16 *)p);
+ vec_dst(( unsigned *)p,1,1);
+ vec_dstst(( unsigned *)p,1,1);
+ vec_dststt(( unsigned *)p,1,1);
+ vec_dstt(( unsigned *)p,1,1);
+ vec_dst((_16 *)p,1,1);
+ vec_dstst((_16 *)p,1,1);
+ vec_dststt((_16 *)p,1,1);
+ vec_dstt((_16 *)p,1,1);
+ vec_st(u32,1,( unsigned *)p);
+ vec_st(u32,1,(_16 *)p);
+ vec_ste(u32,1,( unsigned *)p);
+ vec_ste(u32,1,(_16 *)p);
+ vec_stl(u32,1,( unsigned *)p);
+ vec_stl(u32,1,(_16 *)p);
+ u8 = vec_lvsl(1,(const volatile signed short int *)p);
+ u8 = vec_lvsl(1,(_17 *)p);
+ u8 = vec_lvsr(1,(const volatile signed short int *)p);
+ u8 = vec_lvsr(1,(_17 *)p);
+ u8 = vec_lvsl(1,(const signed short int *)p);
+ u8 = vec_lvsl(1,(_18 *)p);
+ u8 = vec_lvsr(1,(const signed short int *)p);
+ u8 = vec_lvsr(1,(_18 *)p);
+ s16 = vec_ld(1,(const signed short int *)p);
+ s16 = vec_ld(1,(_18 *)p);
+ s16 = vec_lde(1,(const signed short int *)p);
+ s16 = vec_lde(1,(_18 *)p);
+ s16 = vec_ldl(1,(const signed short int *)p);
+ s16 = vec_ldl(1,(_18 *)p);
+ vec_dst((const signed short int *)p,1,1);
+ vec_dstst((const signed short int *)p,1,1);
+ vec_dststt((const signed short int *)p,1,1);
+ vec_dstt((const signed short int *)p,1,1);
+ vec_dst((_18 *)p,1,1);
+ vec_dstst((_18 *)p,1,1);
+ vec_dststt((_18 *)p,1,1);
+ vec_dstt((_18 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed short int *)p);
+ u8 = vec_lvsl(1,(_19 *)p);
+ u8 = vec_lvsr(1,( volatile signed short int *)p);
+ u8 = vec_lvsr(1,(_19 *)p);
+ u8 = vec_lvsl(1,( signed short int *)p);
+ u8 = vec_lvsl(1,(_20 *)p);
+ u8 = vec_lvsr(1,( signed short int *)p);
+ u8 = vec_lvsr(1,(_20 *)p);
+ s16 = vec_ld(1,( signed short int *)p);
+ s16 = vec_ld(1,(_20 *)p);
+ s16 = vec_lde(1,( signed short int *)p);
+ s16 = vec_lde(1,(_20 *)p);
+ s16 = vec_ldl(1,( signed short int *)p);
+ s16 = vec_ldl(1,(_20 *)p);
+ vec_dst(( signed short int *)p,1,1);
+ vec_dstst(( signed short int *)p,1,1);
+ vec_dststt(( signed short int *)p,1,1);
+ vec_dstt(( signed short int *)p,1,1);
+ vec_dst((_20 *)p,1,1);
+ vec_dstst((_20 *)p,1,1);
+ vec_dststt((_20 *)p,1,1);
+ vec_dstt((_20 *)p,1,1);
+ vec_st(s16,1,( signed short int *)p);
+ vec_st(s16,1,(_20 *)p);
+ vec_ste(s16,1,( signed short int *)p);
+ vec_ste(s16,1,(_20 *)p);
+ vec_stl(s16,1,( signed short int *)p);
+ vec_stl(s16,1,(_20 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned short int *)p);
+ u8 = vec_lvsl(1,(_21 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned short int *)p);
+ u8 = vec_lvsr(1,(_21 *)p);
+ u8 = vec_lvsl(1,(const unsigned short int *)p);
+ u8 = vec_lvsl(1,(_22 *)p);
+ u8 = vec_lvsr(1,(const unsigned short int *)p);
+ u8 = vec_lvsr(1,(_22 *)p);
+ u16 = vec_ld(1,(const unsigned short int *)p);
+ u16 = vec_ld(1,(_22 *)p);
+ u16 = vec_lde(1,(const unsigned short int *)p);
+ u16 = vec_lde(1,(_22 *)p);
+ u16 = vec_ldl(1,(const unsigned short int *)p);
+ u16 = vec_ldl(1,(_22 *)p);
+ vec_dst((const unsigned short int *)p,1,1);
+ vec_dstst((const unsigned short int *)p,1,1);
+ vec_dststt((const unsigned short int *)p,1,1);
+ vec_dstt((const unsigned short int *)p,1,1);
+ vec_dst((_22 *)p,1,1);
+ vec_dstst((_22 *)p,1,1);
+ vec_dststt((_22 *)p,1,1);
+ vec_dstt((_22 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned short int *)p);
+ u8 = vec_lvsl(1,(_23 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned short int *)p);
+ u8 = vec_lvsr(1,(_23 *)p);
+ u8 = vec_lvsl(1,( unsigned short int *)p);
+ u8 = vec_lvsl(1,(_24 *)p);
+ u8 = vec_lvsr(1,( unsigned short int *)p);
+ u8 = vec_lvsr(1,(_24 *)p);
+ u16 = vec_ld(1,( unsigned short int *)p);
+ u16 = vec_ld(1,(_24 *)p);
+ u16 = vec_lde(1,( unsigned short int *)p);
+ u16 = vec_lde(1,(_24 *)p);
+ u16 = vec_ldl(1,( unsigned short int *)p);
+ u16 = vec_ldl(1,(_24 *)p);
+ vec_dst(( unsigned short int *)p,1,1);
+ vec_dstst(( unsigned short int *)p,1,1);
+ vec_dststt(( unsigned short int *)p,1,1);
+ vec_dstt(( unsigned short int *)p,1,1);
+ vec_dst((_24 *)p,1,1);
+ vec_dstst((_24 *)p,1,1);
+ vec_dststt((_24 *)p,1,1);
+ vec_dstt((_24 *)p,1,1);
+ vec_st(u16,1,( unsigned short int *)p);
+ vec_st(u16,1,(_24 *)p);
+ vec_ste(u16,1,( unsigned short int *)p);
+ vec_ste(u16,1,(_24 *)p);
+ vec_stl(u16,1,( unsigned short int *)p);
+ vec_stl(u16,1,(_24 *)p);
+ p16 = vec_ld(1,(const vector pixel *)p);
+ p16 = vec_ld(1,(_26 *)p);
+ p16 = vec_ldl(1,(const vector pixel *)p);
+ p16 = vec_ldl(1,(_26 *)p);
+ vec_dst((const vector pixel *)p,1,1);
+ vec_dstst((const vector pixel *)p,1,1);
+ vec_dststt((const vector pixel *)p,1,1);
+ vec_dstt((const vector pixel *)p,1,1);
+ vec_dst((_26 *)p,1,1);
+ vec_dstst((_26 *)p,1,1);
+ vec_dststt((_26 *)p,1,1);
+ vec_dstt((_26 *)p,1,1);
+ p16 = vec_ld(1,( vector pixel *)p);
+ p16 = vec_ld(1,(_28 *)p);
+ p16 = vec_ldl(1,( vector pixel *)p);
+ p16 = vec_ldl(1,(_28 *)p);
+ vec_dst(( vector pixel *)p,1,1);
+ vec_dstst(( vector pixel *)p,1,1);
+ vec_dststt(( vector pixel *)p,1,1);
+ vec_dstt(( vector pixel *)p,1,1);
+ vec_dst((_28 *)p,1,1);
+ vec_dstst((_28 *)p,1,1);
+ vec_dststt((_28 *)p,1,1);
+ vec_dstt((_28 *)p,1,1);
+ vec_st(p16,1,( vector pixel *)p);
+ vec_st(p16,1,(_28 *)p);
+ vec_stl(p16,1,( vector pixel *)p);
+ vec_stl(p16,1,(_28 *)p);
+ b32 = vec_ld(1,(const vector bool int *)p);
+ b32 = vec_ld(1,(_30 *)p);
+ b32 = vec_ldl(1,(const vector bool int *)p);
+ b32 = vec_ldl(1,(_30 *)p);
+ vec_dst((const vector bool int *)p,1,1);
+ vec_dstst((const vector bool int *)p,1,1);
+ vec_dststt((const vector bool int *)p,1,1);
+ vec_dstt((const vector bool int *)p,1,1);
+ vec_dst((_30 *)p,1,1);
+ vec_dstst((_30 *)p,1,1);
+ vec_dststt((_30 *)p,1,1);
+ vec_dstt((_30 *)p,1,1);
+ b32 = vec_ld(1,( vector bool int *)p);
+ b32 = vec_ld(1,(_32 *)p);
+ b32 = vec_ldl(1,( vector bool int *)p);
+ b32 = vec_ldl(1,(_32 *)p);
+ vec_dst(( vector bool int *)p,1,1);
+ vec_dstst(( vector bool int *)p,1,1);
+ vec_dststt(( vector bool int *)p,1,1);
+ vec_dstt(( vector bool int *)p,1,1);
+ vec_dst((_32 *)p,1,1);
+ vec_dstst((_32 *)p,1,1);
+ vec_dststt((_32 *)p,1,1);
+ vec_dstt((_32 *)p,1,1);
+ vec_st(b32,1,( vector bool int *)p);
+ vec_st(b32,1,(_32 *)p);
+ vec_stl(b32,1,( vector bool int *)p);
+ vec_stl(b32,1,(_32 *)p);
+ s8 = vec_ld(1,(const vector signed char *)p);
+ s8 = vec_ld(1,(_34 *)p);
+ s8 = vec_ldl(1,(const vector signed char *)p);
+ s8 = vec_ldl(1,(_34 *)p);
+ vec_dst((const vector signed char *)p,1,1);
+ vec_dstst((const vector signed char *)p,1,1);
+ vec_dststt((const vector signed char *)p,1,1);
+ vec_dstt((const vector signed char *)p,1,1);
+ vec_dst((_34 *)p,1,1);
+ vec_dstst((_34 *)p,1,1);
+ vec_dststt((_34 *)p,1,1);
+ vec_dstt((_34 *)p,1,1);
+ s8 = vec_ld(1,( vector signed char *)p);
+ s8 = vec_ld(1,(_36 *)p);
+ s8 = vec_ldl(1,( vector signed char *)p);
+ s8 = vec_ldl(1,(_36 *)p);
+ vec_dst(( vector signed char *)p,1,1);
+ vec_dstst(( vector signed char *)p,1,1);
+ vec_dststt(( vector signed char *)p,1,1);
+ vec_dstt(( vector signed char *)p,1,1);
+ vec_dst((_36 *)p,1,1);
+ vec_dstst((_36 *)p,1,1);
+ vec_dststt((_36 *)p,1,1);
+ vec_dstt((_36 *)p,1,1);
+ vec_st(s8,1,( vector signed char *)p);
+ vec_st(s8,1,(_36 *)p);
+ vec_stl(s8,1,( vector signed char *)p);
+ vec_stl(s8,1,(_36 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_37 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_37 *)p);
+ u8 = vec_lvsl(1,(const unsigned *)p);
+ u8 = vec_lvsl(1,(_38 *)p);
+ u8 = vec_lvsr(1,(const unsigned *)p);
+ u8 = vec_lvsr(1,(_38 *)p);
+ u32 = vec_ld(1,(const unsigned *)p);
+ u32 = vec_ld(1,(_38 *)p);
+ u32 = vec_lde(1,(const unsigned *)p);
+ u32 = vec_lde(1,(_38 *)p);
+ u32 = vec_ldl(1,(const unsigned *)p);
+ u32 = vec_ldl(1,(_38 *)p);
+ vec_dst((const unsigned *)p,1,1);
+ vec_dstst((const unsigned *)p,1,1);
+ vec_dststt((const unsigned *)p,1,1);
+ vec_dstt((const unsigned *)p,1,1);
+ vec_dst((_38 *)p,1,1);
+ vec_dstst((_38 *)p,1,1);
+ vec_dststt((_38 *)p,1,1);
+ vec_dstt((_38 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_39 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_39 *)p);
+ u8 = vec_lvsl(1,( unsigned *)p);
+ u8 = vec_lvsl(1,(_40 *)p);
+ u8 = vec_lvsr(1,( unsigned *)p);
+ u8 = vec_lvsr(1,(_40 *)p);
+ u32 = vec_ld(1,( unsigned *)p);
+ u32 = vec_ld(1,(_40 *)p);
+ u32 = vec_lde(1,( unsigned *)p);
+ u32 = vec_lde(1,(_40 *)p);
+ u32 = vec_ldl(1,( unsigned *)p);
+ u32 = vec_ldl(1,(_40 *)p);
+ vec_dst(( unsigned *)p,1,1);
+ vec_dstst(( unsigned *)p,1,1);
+ vec_dststt(( unsigned *)p,1,1);
+ vec_dstt(( unsigned *)p,1,1);
+ vec_dst((_40 *)p,1,1);
+ vec_dstst((_40 *)p,1,1);
+ vec_dststt((_40 *)p,1,1);
+ vec_dstt((_40 *)p,1,1);
+ vec_st(u32,1,( unsigned *)p);
+ vec_st(u32,1,(_40 *)p);
+ vec_ste(u32,1,( unsigned *)p);
+ vec_ste(u32,1,(_40 *)p);
+ vec_stl(u32,1,( unsigned *)p);
+ vec_stl(u32,1,(_40 *)p);
+ u8 = vec_lvsl(1,(const volatile signed int *)p);
+ u8 = vec_lvsl(1,(_41 *)p);
+ u8 = vec_lvsr(1,(const volatile signed int *)p);
+ u8 = vec_lvsr(1,(_41 *)p);
+ u8 = vec_lvsl(1,(const signed int *)p);
+ u8 = vec_lvsl(1,(_42 *)p);
+ u8 = vec_lvsr(1,(const signed int *)p);
+ u8 = vec_lvsr(1,(_42 *)p);
+ s32 = vec_ld(1,(const signed int *)p);
+ s32 = vec_ld(1,(_42 *)p);
+ s32 = vec_lde(1,(const signed int *)p);
+ s32 = vec_lde(1,(_42 *)p);
+ s32 = vec_ldl(1,(const signed int *)p);
+ s32 = vec_ldl(1,(_42 *)p);
+ vec_dst((const signed int *)p,1,1);
+ vec_dstst((const signed int *)p,1,1);
+ vec_dststt((const signed int *)p,1,1);
+ vec_dstt((const signed int *)p,1,1);
+ vec_dst((_42 *)p,1,1);
+ vec_dstst((_42 *)p,1,1);
+ vec_dststt((_42 *)p,1,1);
+ vec_dstt((_42 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed int *)p);
+ u8 = vec_lvsl(1,(_43 *)p);
+ u8 = vec_lvsr(1,( volatile signed int *)p);
+ u8 = vec_lvsr(1,(_43 *)p);
+ u8 = vec_lvsl(1,( signed int *)p);
+ u8 = vec_lvsl(1,(_44 *)p);
+ u8 = vec_lvsr(1,( signed int *)p);
+ u8 = vec_lvsr(1,(_44 *)p);
+ s32 = vec_ld(1,( signed int *)p);
+ s32 = vec_ld(1,(_44 *)p);
+ s32 = vec_lde(1,( signed int *)p);
+ s32 = vec_lde(1,(_44 *)p);
+ s32 = vec_ldl(1,( signed int *)p);
+ s32 = vec_ldl(1,(_44 *)p);
+ vec_dst(( signed int *)p,1,1);
+ vec_dstst(( signed int *)p,1,1);
+ vec_dststt(( signed int *)p,1,1);
+ vec_dstt(( signed int *)p,1,1);
+ vec_dst((_44 *)p,1,1);
+ vec_dstst((_44 *)p,1,1);
+ vec_dststt((_44 *)p,1,1);
+ vec_dstt((_44 *)p,1,1);
+ vec_st(s32,1,( signed int *)p);
+ vec_st(s32,1,(_44 *)p);
+ vec_ste(s32,1,( signed int *)p);
+ vec_ste(s32,1,(_44 *)p);
+ vec_stl(s32,1,( signed int *)p);
+ vec_stl(s32,1,(_44 *)p);
+ f32 = vec_ld(1,(const vector float *)p);
+ f32 = vec_ld(1,(_46 *)p);
+ f32 = vec_ldl(1,(const vector float *)p);
+ f32 = vec_ldl(1,(_46 *)p);
+ vec_dst((const vector float *)p,1,1);
+ vec_dstst((const vector float *)p,1,1);
+ vec_dststt((const vector float *)p,1,1);
+ vec_dstt((const vector float *)p,1,1);
+ vec_dst((_46 *)p,1,1);
+ vec_dstst((_46 *)p,1,1);
+ vec_dststt((_46 *)p,1,1);
+ vec_dstt((_46 *)p,1,1);
+ f32 = vec_ld(1,( vector float *)p);
+ f32 = vec_ld(1,(_48 *)p);
+ f32 = vec_ldl(1,( vector float *)p);
+ f32 = vec_ldl(1,(_48 *)p);
+ vec_dst(( vector float *)p,1,1);
+ vec_dstst(( vector float *)p,1,1);
+ vec_dststt(( vector float *)p,1,1);
+ vec_dstt(( vector float *)p,1,1);
+ vec_dst((_48 *)p,1,1);
+ vec_dstst((_48 *)p,1,1);
+ vec_dststt((_48 *)p,1,1);
+ vec_dstt((_48 *)p,1,1);
+ vec_st(f32,1,( vector float *)p);
+ vec_st(f32,1,(_48 *)p);
+ vec_stl(f32,1,( vector float *)p);
+ vec_stl(f32,1,(_48 *)p);
+ s16 = vec_ld(1,(const vector signed short *)p);
+ s16 = vec_ld(1,(_50 *)p);
+ s16 = vec_ldl(1,(const vector signed short *)p);
+ s16 = vec_ldl(1,(_50 *)p);
+ vec_dst((const vector signed short *)p,1,1);
+ vec_dstst((const vector signed short *)p,1,1);
+ vec_dststt((const vector signed short *)p,1,1);
+ vec_dstt((const vector signed short *)p,1,1);
+ vec_dst((_50 *)p,1,1);
+ vec_dstst((_50 *)p,1,1);
+ vec_dststt((_50 *)p,1,1);
+ vec_dstt((_50 *)p,1,1);
+ s16 = vec_ld(1,( vector signed short *)p);
+ s16 = vec_ld(1,(_52 *)p);
+ s16 = vec_ldl(1,( vector signed short *)p);
+ s16 = vec_ldl(1,(_52 *)p);
+ vec_dst(( vector signed short *)p,1,1);
+ vec_dstst(( vector signed short *)p,1,1);
+ vec_dststt(( vector signed short *)p,1,1);
+ vec_dstt(( vector signed short *)p,1,1);
+ vec_dst((_52 *)p,1,1);
+ vec_dstst((_52 *)p,1,1);
+ vec_dststt((_52 *)p,1,1);
+ vec_dstt((_52 *)p,1,1);
+ vec_st(s16,1,( vector signed short *)p);
+ vec_st(s16,1,(_52 *)p);
+ vec_stl(s16,1,( vector signed short *)p);
+ vec_stl(s16,1,(_52 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned char *)p);
+ u8 = vec_lvsl(1,(_53 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned char *)p);
+ u8 = vec_lvsr(1,(_53 *)p);
+ u8 = vec_lvsl(1,(const unsigned char *)p);
+ u8 = vec_lvsl(1,(_54 *)p);
+ u8 = vec_lvsr(1,(const unsigned char *)p);
+ u8 = vec_lvsr(1,(_54 *)p);
+ u8 = vec_ld(1,(const unsigned char *)p);
+ u8 = vec_ld(1,(_54 *)p);
+ u8 = vec_lde(1,(const unsigned char *)p);
+ u8 = vec_lde(1,(_54 *)p);
+ u8 = vec_ldl(1,(const unsigned char *)p);
+ u8 = vec_ldl(1,(_54 *)p);
+ vec_dst((const unsigned char *)p,1,1);
+ vec_dstst((const unsigned char *)p,1,1);
+ vec_dststt((const unsigned char *)p,1,1);
+ vec_dstt((const unsigned char *)p,1,1);
+ vec_dst((_54 *)p,1,1);
+ vec_dstst((_54 *)p,1,1);
+ vec_dststt((_54 *)p,1,1);
+ vec_dstt((_54 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned char *)p);
+ u8 = vec_lvsl(1,(_55 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned char *)p);
+ u8 = vec_lvsr(1,(_55 *)p);
+ u8 = vec_lvsl(1,( unsigned char *)p);
+ u8 = vec_lvsl(1,(_56 *)p);
+ u8 = vec_lvsr(1,( unsigned char *)p);
+ u8 = vec_lvsr(1,(_56 *)p);
+ u8 = vec_ld(1,( unsigned char *)p);
+ u8 = vec_ld(1,(_56 *)p);
+ u8 = vec_lde(1,( unsigned char *)p);
+ u8 = vec_lde(1,(_56 *)p);
+ u8 = vec_ldl(1,( unsigned char *)p);
+ u8 = vec_ldl(1,(_56 *)p);
+ vec_dst(( unsigned char *)p,1,1);
+ vec_dstst(( unsigned char *)p,1,1);
+ vec_dststt(( unsigned char *)p,1,1);
+ vec_dstt(( unsigned char *)p,1,1);
+ vec_dst((_56 *)p,1,1);
+ vec_dstst((_56 *)p,1,1);
+ vec_dststt((_56 *)p,1,1);
+ vec_dstt((_56 *)p,1,1);
+ vec_st(u8,1,( unsigned char *)p);
+ vec_st(u8,1,(_56 *)p);
+ vec_ste(u8,1,( unsigned char *)p);
+ vec_ste(u8,1,(_56 *)p);
+ vec_stl(u8,1,( unsigned char *)p);
+ vec_stl(u8,1,(_56 *)p);
+ u8 = vec_lvsl(1,(const volatile signed int *)p);
+ u8 = vec_lvsl(1,(_57 *)p);
+ u8 = vec_lvsr(1,(const volatile signed int *)p);
+ u8 = vec_lvsr(1,(_57 *)p);
+ u8 = vec_lvsl(1,(const signed int *)p);
+ u8 = vec_lvsl(1,(_58 *)p);
+ u8 = vec_lvsr(1,(const signed int *)p);
+ u8 = vec_lvsr(1,(_58 *)p);
+ s32 = vec_ld(1,(const signed int *)p);
+ s32 = vec_ld(1,(_58 *)p);
+ s32 = vec_lde(1,(const signed int *)p);
+ s32 = vec_lde(1,(_58 *)p);
+ s32 = vec_ldl(1,(const signed int *)p);
+ s32 = vec_ldl(1,(_58 *)p);
+ vec_dst((const signed int *)p,1,1);
+ vec_dstst((const signed int *)p,1,1);
+ vec_dststt((const signed int *)p,1,1);
+ vec_dstt((const signed int *)p,1,1);
+ vec_dst((_58 *)p,1,1);
+ vec_dstst((_58 *)p,1,1);
+ vec_dststt((_58 *)p,1,1);
+ vec_dstt((_58 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed int *)p);
+ u8 = vec_lvsl(1,(_59 *)p);
+ u8 = vec_lvsr(1,( volatile signed int *)p);
+ u8 = vec_lvsr(1,(_59 *)p);
+ u8 = vec_lvsl(1,( signed int *)p);
+ u8 = vec_lvsl(1,(_60 *)p);
+ u8 = vec_lvsr(1,( signed int *)p);
+ u8 = vec_lvsr(1,(_60 *)p);
+ s32 = vec_ld(1,( signed int *)p);
+ s32 = vec_ld(1,(_60 *)p);
+ s32 = vec_lde(1,( signed int *)p);
+ s32 = vec_lde(1,(_60 *)p);
+ s32 = vec_ldl(1,( signed int *)p);
+ s32 = vec_ldl(1,(_60 *)p);
+ vec_dst(( signed int *)p,1,1);
+ vec_dstst(( signed int *)p,1,1);
+ vec_dststt(( signed int *)p,1,1);
+ vec_dstt(( signed int *)p,1,1);
+ vec_dst((_60 *)p,1,1);
+ vec_dstst((_60 *)p,1,1);
+ vec_dststt((_60 *)p,1,1);
+ vec_dstt((_60 *)p,1,1);
+ vec_st(s32,1,( signed int *)p);
+ vec_st(s32,1,(_60 *)p);
+ vec_ste(s32,1,( signed int *)p);
+ vec_ste(s32,1,(_60 *)p);
+ vec_stl(s32,1,( signed int *)p);
+ vec_stl(s32,1,(_60 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_61 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_61 *)p);
+ u8 = vec_lvsl(1,(const unsigned int *)p);
+ u8 = vec_lvsl(1,(_62 *)p);
+ u8 = vec_lvsr(1,(const unsigned int *)p);
+ u8 = vec_lvsr(1,(_62 *)p);
+ u32 = vec_ld(1,(const unsigned int *)p);
+ u32 = vec_ld(1,(_62 *)p);
+ u32 = vec_lde(1,(const unsigned int *)p);
+ u32 = vec_lde(1,(_62 *)p);
+ u32 = vec_ldl(1,(const unsigned int *)p);
+ u32 = vec_ldl(1,(_62 *)p);
+ vec_dst((const unsigned int *)p,1,1);
+ vec_dstst((const unsigned int *)p,1,1);
+ vec_dststt((const unsigned int *)p,1,1);
+ vec_dstt((const unsigned int *)p,1,1);
+ vec_dst((_62 *)p,1,1);
+ vec_dstst((_62 *)p,1,1);
+ vec_dststt((_62 *)p,1,1);
+ vec_dstt((_62 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_63 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_63 *)p);
+ u8 = vec_lvsl(1,( unsigned int *)p);
+ u8 = vec_lvsl(1,(_64 *)p);
+ u8 = vec_lvsr(1,( unsigned int *)p);
+ u8 = vec_lvsr(1,(_64 *)p);
+ u32 = vec_ld(1,( unsigned int *)p);
+ u32 = vec_ld(1,(_64 *)p);
+ u32 = vec_lde(1,( unsigned int *)p);
+ u32 = vec_lde(1,(_64 *)p);
+ u32 = vec_ldl(1,( unsigned int *)p);
+ u32 = vec_ldl(1,(_64 *)p);
+ vec_dst(( unsigned int *)p,1,1);
+ vec_dstst(( unsigned int *)p,1,1);
+ vec_dststt(( unsigned int *)p,1,1);
+ vec_dstt(( unsigned int *)p,1,1);
+ vec_dst((_64 *)p,1,1);
+ vec_dstst((_64 *)p,1,1);
+ vec_dststt((_64 *)p,1,1);
+ vec_dstt((_64 *)p,1,1);
+ vec_st(u32,1,( unsigned int *)p);
+ vec_st(u32,1,(_64 *)p);
+ vec_ste(u32,1,( unsigned int *)p);
+ vec_ste(u32,1,(_64 *)p);
+ vec_stl(u32,1,( unsigned int *)p);
+ vec_stl(u32,1,(_64 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned short *)p);
+ u8 = vec_lvsl(1,(_65 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned short *)p);
+ u8 = vec_lvsr(1,(_65 *)p);
+ u8 = vec_lvsl(1,(const unsigned short *)p);
+ u8 = vec_lvsl(1,(_66 *)p);
+ u8 = vec_lvsr(1,(const unsigned short *)p);
+ u8 = vec_lvsr(1,(_66 *)p);
+ u16 = vec_ld(1,(const unsigned short *)p);
+ u16 = vec_ld(1,(_66 *)p);
+ u16 = vec_lde(1,(const unsigned short *)p);
+ u16 = vec_lde(1,(_66 *)p);
+ u16 = vec_ldl(1,(const unsigned short *)p);
+ u16 = vec_ldl(1,(_66 *)p);
+ vec_dst((const unsigned short *)p,1,1);
+ vec_dstst((const unsigned short *)p,1,1);
+ vec_dststt((const unsigned short *)p,1,1);
+ vec_dstt((const unsigned short *)p,1,1);
+ vec_dst((_66 *)p,1,1);
+ vec_dstst((_66 *)p,1,1);
+ vec_dststt((_66 *)p,1,1);
+ vec_dstt((_66 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned short *)p);
+ u8 = vec_lvsl(1,(_67 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned short *)p);
+ u8 = vec_lvsr(1,(_67 *)p);
+ u8 = vec_lvsl(1,( unsigned short *)p);
+ u8 = vec_lvsl(1,(_68 *)p);
+ u8 = vec_lvsr(1,( unsigned short *)p);
+ u8 = vec_lvsr(1,(_68 *)p);
+ u16 = vec_ld(1,( unsigned short *)p);
+ u16 = vec_ld(1,(_68 *)p);
+ u16 = vec_lde(1,( unsigned short *)p);
+ u16 = vec_lde(1,(_68 *)p);
+ u16 = vec_ldl(1,( unsigned short *)p);
+ u16 = vec_ldl(1,(_68 *)p);
+ vec_dst(( unsigned short *)p,1,1);
+ vec_dstst(( unsigned short *)p,1,1);
+ vec_dststt(( unsigned short *)p,1,1);
+ vec_dstt(( unsigned short *)p,1,1);
+ vec_dst((_68 *)p,1,1);
+ vec_dstst((_68 *)p,1,1);
+ vec_dststt((_68 *)p,1,1);
+ vec_dstt((_68 *)p,1,1);
+ vec_st(u16,1,( unsigned short *)p);
+ vec_st(u16,1,(_68 *)p);
+ vec_ste(u16,1,( unsigned short *)p);
+ vec_ste(u16,1,(_68 *)p);
+ vec_stl(u16,1,( unsigned short *)p);
+ vec_stl(u16,1,(_68 *)p);
+ u8 = vec_lvsl(1,(const volatile short *)p);
+ u8 = vec_lvsl(1,(_69 *)p);
+ u8 = vec_lvsr(1,(const volatile short *)p);
+ u8 = vec_lvsr(1,(_69 *)p);
+ u8 = vec_lvsl(1,(const short *)p);
+ u8 = vec_lvsl(1,(_70 *)p);
+ u8 = vec_lvsr(1,(const short *)p);
+ u8 = vec_lvsr(1,(_70 *)p);
+ s16 = vec_ld(1,(const short *)p);
+ s16 = vec_ld(1,(_70 *)p);
+ s16 = vec_lde(1,(const short *)p);
+ s16 = vec_lde(1,(_70 *)p);
+ s16 = vec_ldl(1,(const short *)p);
+ s16 = vec_ldl(1,(_70 *)p);
+ vec_dst((const short *)p,1,1);
+ vec_dstst((const short *)p,1,1);
+ vec_dststt((const short *)p,1,1);
+ vec_dstt((const short *)p,1,1);
+ vec_dst((_70 *)p,1,1);
+ vec_dstst((_70 *)p,1,1);
+ vec_dststt((_70 *)p,1,1);
+ vec_dstt((_70 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile short *)p);
+ u8 = vec_lvsl(1,(_71 *)p);
+ u8 = vec_lvsr(1,( volatile short *)p);
+ u8 = vec_lvsr(1,(_71 *)p);
+ u8 = vec_lvsl(1,( short *)p);
+ u8 = vec_lvsl(1,(_72 *)p);
+ u8 = vec_lvsr(1,( short *)p);
+ u8 = vec_lvsr(1,(_72 *)p);
+ s16 = vec_ld(1,( short *)p);
+ s16 = vec_ld(1,(_72 *)p);
+ s16 = vec_lde(1,( short *)p);
+ s16 = vec_lde(1,(_72 *)p);
+ s16 = vec_ldl(1,( short *)p);
+ s16 = vec_ldl(1,(_72 *)p);
+ vec_dst(( short *)p,1,1);
+ vec_dstst(( short *)p,1,1);
+ vec_dststt(( short *)p,1,1);
+ vec_dstt(( short *)p,1,1);
+ vec_dst((_72 *)p,1,1);
+ vec_dstst((_72 *)p,1,1);
+ vec_dststt((_72 *)p,1,1);
+ vec_dstt((_72 *)p,1,1);
+ vec_st(s16,1,( short *)p);
+ vec_st(s16,1,(_72 *)p);
+ vec_ste(s16,1,( short *)p);
+ vec_ste(s16,1,(_72 *)p);
+ vec_stl(s16,1,( short *)p);
+ vec_stl(s16,1,(_72 *)p);
+ u8 = vec_lvsl(1,(const int volatile *)p);
+ u8 = vec_lvsl(1,(_73 *)p);
+ u8 = vec_lvsr(1,(const int volatile *)p);
+ u8 = vec_lvsr(1,(_73 *)p);
+ u8 = vec_lvsl(1,(const int *)p);
+ u8 = vec_lvsl(1,(_74 *)p);
+ u8 = vec_lvsr(1,(const int *)p);
+ u8 = vec_lvsr(1,(_74 *)p);
+ s32 = vec_ld(1,(const int *)p);
+ s32 = vec_ld(1,(_74 *)p);
+ s32 = vec_lde(1,(const int *)p);
+ s32 = vec_lde(1,(_74 *)p);
+ s32 = vec_ldl(1,(const int *)p);
+ s32 = vec_ldl(1,(_74 *)p);
+ vec_dst((const int *)p,1,1);
+ vec_dstst((const int *)p,1,1);
+ vec_dststt((const int *)p,1,1);
+ vec_dstt((const int *)p,1,1);
+ vec_dst((_74 *)p,1,1);
+ vec_dstst((_74 *)p,1,1);
+ vec_dststt((_74 *)p,1,1);
+ vec_dstt((_74 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int *)p);
+ u8 = vec_lvsl(1,(_75 *)p);
+ u8 = vec_lvsr(1,( volatile int *)p);
+ u8 = vec_lvsr(1,(_75 *)p);
+ u8 = vec_lvsl(1,( int *)p);
+ u8 = vec_lvsl(1,(_76 *)p);
+ u8 = vec_lvsr(1,( int *)p);
+ u8 = vec_lvsr(1,(_76 *)p);
+ s32 = vec_ld(1,( int *)p);
+ s32 = vec_ld(1,(_76 *)p);
+ s32 = vec_lde(1,(int *)p);
+ s32 = vec_lde(1,(_76 *)p);
+ s32 = vec_ldl(1,(int *)p);
+ s32 = vec_ldl(1,(_76 *)p);
+ vec_dst((int *)p,1,1);
+ vec_dstst((int *)p,1,1);
+ vec_dststt((int *)p,1,1);
+ vec_dstt((int *)p,1,1);
+ vec_dst((_76 *)p,1,1);
+ vec_dstst((_76 *)p,1,1);
+ vec_dststt((_76 *)p,1,1);
+ vec_dstt((_76 *)p,1,1);
+ vec_st(s32,1,(int *)p);
+ vec_st(s32,1,(_76 *)p);
+ vec_ste(s32,1,(int *)p);
+ vec_ste(s32,1,(_76 *)p);
+ vec_stl(s32,1,(int *)p);
+ vec_stl(s32,1,(_76 *)p);
+ u16 = vec_ld(1,(const vector unsigned short *)p);
+ u16 = vec_ld(1,(_78 *)p);
+ u16 = vec_ldl(1,(const vector unsigned short *)p);
+ u16 = vec_ldl(1,(_78 *)p);
+ vec_dst((const vector unsigned short *)p,1,1);
+ vec_dstst((const vector unsigned short *)p,1,1);
+ vec_dststt((const vector unsigned short *)p,1,1);
+ vec_dstt((const vector unsigned short *)p,1,1);
+ vec_dst((_78 *)p,1,1);
+ vec_dstst((_78 *)p,1,1);
+ vec_dststt((_78 *)p,1,1);
+ vec_dstt((_78 *)p,1,1);
+ u16 = vec_ld(1,( vector unsigned short *)p);
+ u16 = vec_ld(1,(_80 *)p);
+ u16 = vec_ldl(1,( vector unsigned short *)p);
+ u16 = vec_ldl(1,(_80 *)p);
+ vec_dst(( vector unsigned short *)p,1,1);
+ vec_dstst(( vector unsigned short *)p,1,1);
+ vec_dststt(( vector unsigned short *)p,1,1);
+ vec_dstt(( vector unsigned short *)p,1,1);
+ vec_dst((_80 *)p,1,1);
+ vec_dstst((_80 *)p,1,1);
+ vec_dststt((_80 *)p,1,1);
+ vec_dstt((_80 *)p,1,1);
+ vec_st(u16,1,( vector unsigned short *)p);
+ vec_st(u16,1,(_80 *)p);
+ vec_stl(u16,1,( vector unsigned short *)p);
+ vec_stl(u16,1,(_80 *)p);
+ b8 = vec_ld(1,(const vector bool char *)p);
+ b8 = vec_ld(1,(_82 *)p);
+ b8 = vec_ldl(1,(const vector bool char *)p);
+ b8 = vec_ldl(1,(_82 *)p);
+ vec_dst((const vector bool char *)p,1,1);
+ vec_dstst((const vector bool char *)p,1,1);
+ vec_dststt((const vector bool char *)p,1,1);
+ vec_dstt((const vector bool char *)p,1,1);
+ vec_dst((_82 *)p,1,1);
+ vec_dstst((_82 *)p,1,1);
+ vec_dststt((_82 *)p,1,1);
+ vec_dstt((_82 *)p,1,1);
+ b8 = vec_ld(1,( vector bool char *)p);
+ b8 = vec_ld(1,(_84 *)p);
+ b8 = vec_ldl(1,( vector bool char *)p);
+ b8 = vec_ldl(1,(_84 *)p);
+ vec_dst(( vector bool char *)p,1,1);
+ vec_dstst(( vector bool char *)p,1,1);
+ vec_dststt(( vector bool char *)p,1,1);
+ vec_dstt(( vector bool char *)p,1,1);
+ vec_dst((_84 *)p,1,1);
+ vec_dstst((_84 *)p,1,1);
+ vec_dststt((_84 *)p,1,1);
+ vec_dstt((_84 *)p,1,1);
+ vec_st(b8,1,( vector bool char *)p);
+ vec_st(b8,1,(_84 *)p);
+ vec_stl(b8,1,( vector bool char *)p);
+ vec_stl(b8,1,(_84 *)p);
+ u8 = vec_lvsl(1,(const volatile int signed *)p);
+ u8 = vec_lvsl(1,(_85 *)p);
+ u8 = vec_lvsr(1,(const volatile int signed *)p);
+ u8 = vec_lvsr(1,(_85 *)p);
+ u8 = vec_lvsl(1,(const int signed *)p);
+ u8 = vec_lvsl(1,(_86 *)p);
+ u8 = vec_lvsr(1,(const int signed *)p);
+ u8 = vec_lvsr(1,(_86 *)p);
+ s32 = vec_ld(1,(const int signed *)p);
+ s32 = vec_ld(1,(_86 *)p);
+ s32 = vec_lde(1,(const int signed *)p);
+ s32 = vec_lde(1,(_86 *)p);
+ s32 = vec_ldl(1,(const int signed *)p);
+ s32 = vec_ldl(1,(_86 *)p);
+ vec_dst((const int signed *)p,1,1);
+ vec_dstst((const int signed *)p,1,1);
+ vec_dststt((const int signed *)p,1,1);
+ vec_dstt((const int signed *)p,1,1);
+ vec_dst((_86 *)p,1,1);
+ vec_dstst((_86 *)p,1,1);
+ vec_dststt((_86 *)p,1,1);
+ vec_dstt((_86 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int signed *)p);
+ u8 = vec_lvsl(1,(_87 *)p);
+ u8 = vec_lvsr(1,( volatile int signed *)p);
+ u8 = vec_lvsr(1,(_87 *)p);
+ u8 = vec_lvsl(1,(int signed *)p);
+ u8 = vec_lvsl(1,(_88 *)p);
+ u8 = vec_lvsr(1,(int signed *)p);
+ u8 = vec_lvsr(1,(_88 *)p);
+ s32 = vec_ld(1,(int signed *)p);
+ s32 = vec_ld(1,(_88 *)p);
+ s32 = vec_lde(1,(int signed *)p);
+ s32 = vec_lde(1,(_88 *)p);
+ s32 = vec_ldl(1,(int signed *)p);
+ s32 = vec_ldl(1,(_88 *)p);
+ vec_dst((int signed *)p,1,1);
+ vec_dstst((int signed *)p,1,1);
+ vec_dststt((int signed *)p,1,1);
+ vec_dstt((int signed *)p,1,1);
+ vec_dst((_88 *)p,1,1);
+ vec_dstst((_88 *)p,1,1);
+ vec_dststt((_88 *)p,1,1);
+ vec_dstt((_88 *)p,1,1);
+ vec_st(s32,1,(int signed *)p);
+ vec_st(s32,1,(_88 *)p);
+ vec_ste(s32,1,(int signed *)p);
+ vec_ste(s32,1,(_88 *)p);
+ vec_stl(s32,1,(int signed *)p);
+ vec_stl(s32,1,(_88 *)p);
+ s32 = vec_ld(1,(const vector signed int *)p);
+ s32 = vec_ld(1,(_90 *)p);
+ s32 = vec_ldl(1,(const vector signed int *)p);
+ s32 = vec_ldl(1,(_90 *)p);
+ vec_dst((const vector signed int *)p,1,1);
+ vec_dstst((const vector signed int *)p,1,1);
+ vec_dststt((const vector signed int *)p,1,1);
+ vec_dstt((const vector signed int *)p,1,1);
+ vec_dst((_90 *)p,1,1);
+ vec_dstst((_90 *)p,1,1);
+ vec_dststt((_90 *)p,1,1);
+ vec_dstt((_90 *)p,1,1);
+ s32 = vec_ld(1,( vector signed int *)p);
+ s32 = vec_ld(1,(_92 *)p);
+ s32 = vec_ldl(1,( vector signed int *)p);
+ s32 = vec_ldl(1,(_92 *)p);
+ vec_dst(( vector signed int *)p,1,1);
+ vec_dstst(( vector signed int *)p,1,1);
+ vec_dststt(( vector signed int *)p,1,1);
+ vec_dstt(( vector signed int *)p,1,1);
+ vec_dst((_92 *)p,1,1);
+ vec_dstst((_92 *)p,1,1);
+ vec_dststt((_92 *)p,1,1);
+ vec_dstt((_92 *)p,1,1);
+ vec_st(s32,1,( vector signed int *)p);
+ vec_st(s32,1,(_92 *)p);
+ vec_stl(s32,1,( vector signed int *)p);
+ vec_stl(s32,1,(_92 *)p);
+ u32 = vec_ld(1,(const vector unsigned int *)p);
+ u32 = vec_ld(1,(_94 *)p);
+ u32 = vec_ldl(1,(const vector unsigned int *)p);
+ u32 = vec_ldl(1,(_94 *)p);
+ vec_dst((const vector unsigned int *)p,1,1);
+ vec_dstst((const vector unsigned int *)p,1,1);
+ vec_dststt((const vector unsigned int *)p,1,1);
+ vec_dstt((const vector unsigned int *)p,1,1);
+ vec_dst((_94 *)p,1,1);
+ vec_dstst((_94 *)p,1,1);
+ vec_dststt((_94 *)p,1,1);
+ vec_dstt((_94 *)p,1,1);
+ u32 = vec_ld(1,( vector unsigned int *)p);
+ u32 = vec_ld(1,(_96 *)p);
+ u32 = vec_ldl(1,( vector unsigned int *)p);
+ u32 = vec_ldl(1,(_96 *)p);
+ vec_dst(( vector unsigned int *)p,1,1);
+ vec_dstst(( vector unsigned int *)p,1,1);
+ vec_dststt(( vector unsigned int *)p,1,1);
+ vec_dstt(( vector unsigned int *)p,1,1);
+ vec_dst((_96 *)p,1,1);
+ vec_dstst((_96 *)p,1,1);
+ vec_dststt((_96 *)p,1,1);
+ vec_dstt((_96 *)p,1,1);
+ vec_st(u32,1,( vector unsigned int *)p);
+ vec_st(u32,1,(_96 *)p);
+ vec_stl(u32,1,( vector unsigned int *)p);
+ vec_stl(u32,1,(_96 *)p);
+ u8 = vec_lvsl(1,(const volatile int signed *)p);
+ u8 = vec_lvsl(1,(_97 *)p);
+ u8 = vec_lvsr(1,(const volatile int signed *)p);
+ u8 = vec_lvsr(1,(_97 *)p);
+ u8 = vec_lvsl(1,(const int signed *)p);
+ u8 = vec_lvsl(1,(_98 *)p);
+ u8 = vec_lvsr(1,(const int signed *)p);
+ u8 = vec_lvsr(1,(_98 *)p);
+ s32 = vec_ld(1,(const int signed *)p);
+ s32 = vec_ld(1,(_98 *)p);
+ s32 = vec_lde(1,(const int signed *)p);
+ s32 = vec_lde(1,(_98 *)p);
+ s32 = vec_ldl(1,(const int signed *)p);
+ s32 = vec_ldl(1,(_98 *)p);
+ vec_dst((const int signed *)p,1,1);
+ vec_dstst((const int signed *)p,1,1);
+ vec_dststt((const int signed *)p,1,1);
+ vec_dstt((const int signed *)p,1,1);
+ vec_dst((_98 *)p,1,1);
+ vec_dstst((_98 *)p,1,1);
+ vec_dststt((_98 *)p,1,1);
+ vec_dstt((_98 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int signed *)p);
+ u8 = vec_lvsl(1,(_99 *)p);
+ u8 = vec_lvsr(1,( volatile int signed *)p);
+ u8 = vec_lvsr(1,(_99 *)p);
+ u8 = vec_lvsl(1,(int signed *)p);
+ u8 = vec_lvsl(1,(_100 *)p);
+ u8 = vec_lvsr(1,(int signed *)p);
+ u8 = vec_lvsr(1,(_100 *)p);
+ s32 = vec_ld(1,(int signed *)p);
+ s32 = vec_ld(1,(_100 *)p);
+ s32 = vec_lde(1,(int signed *)p);
+ s32 = vec_lde(1,(_100 *)p);
+ s32 = vec_ldl(1,(int signed *)p);
+ s32 = vec_ldl(1,(_100 *)p);
+ vec_dst((int signed *)p,1,1);
+ vec_dstst((int signed *)p,1,1);
+ vec_dststt((int signed *)p,1,1);
+ vec_dstt((int signed *)p,1,1);
+ vec_dst((_100 *)p,1,1);
+ vec_dstst((_100 *)p,1,1);
+ vec_dststt((_100 *)p,1,1);
+ vec_dstt((_100 *)p,1,1);
+ vec_st(s32,1,(int signed *)p);
+ vec_st(s32,1,(_100 *)p);
+ vec_ste(s32,1,(int signed *)p);
+ vec_ste(s32,1,(_100 *)p);
+ vec_stl(s32,1,(int signed *)p);
+ vec_stl(s32,1,(_100 *)p);
+ u8 = vec_lvsl(1,(const volatile short int *)p);
+ u8 = vec_lvsl(1,(_101 *)p);
+ u8 = vec_lvsr(1,(const volatile short int *)p);
+ u8 = vec_lvsr(1,(_101 *)p);
+ u8 = vec_lvsl(1,(const short int *)p);
+ u8 = vec_lvsl(1,(_102 *)p);
+ u8 = vec_lvsr(1,(const short int *)p);
+ u8 = vec_lvsr(1,(_102 *)p);
+ s16 = vec_ld(1,(const short int *)p);
+ s16 = vec_ld(1,(_102 *)p);
+ s16 = vec_lde(1,(const short int *)p);
+ s16 = vec_lde(1,(_102 *)p);
+ s16 = vec_ldl(1,(const short int *)p);
+ s16 = vec_ldl(1,(_102 *)p);
+ vec_dst((const short int *)p,1,1);
+ vec_dstst((const short int *)p,1,1);
+ vec_dststt((const short int *)p,1,1);
+ vec_dstt((const short int *)p,1,1);
+ vec_dst((_102 *)p,1,1);
+ vec_dstst((_102 *)p,1,1);
+ vec_dststt((_102 *)p,1,1);
+ vec_dstt((_102 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile short int *)p);
+ u8 = vec_lvsl(1,(_103 *)p);
+ u8 = vec_lvsr(1,( volatile short int *)p);
+ u8 = vec_lvsr(1,(_103 *)p);
+ u8 = vec_lvsl(1,( short int *)p);
+ u8 = vec_lvsl(1,(_104 *)p);
+ u8 = vec_lvsr(1,( short int *)p);
+ u8 = vec_lvsr(1,(_104 *)p);
+ s16 = vec_ld(1,( short int *)p);
+ s16 = vec_ld(1,(_104 *)p);
+ s16 = vec_lde(1,( short int *)p);
+ s16 = vec_lde(1,(_104 *)p);
+ s16 = vec_ldl(1,( short int *)p);
+ s16 = vec_ldl(1,(_104 *)p);
+ vec_dst(( short int *)p,1,1);
+ vec_dstst(( short int *)p,1,1);
+ vec_dststt(( short int *)p,1,1);
+ vec_dstt(( short int *)p,1,1);
+ vec_dst((_104 *)p,1,1);
+ vec_dstst((_104 *)p,1,1);
+ vec_dststt((_104 *)p,1,1);
+ vec_dstt((_104 *)p,1,1);
+ vec_st(s16,1,( short int *)p);
+ vec_st(s16,1,(_104 *)p);
+ vec_ste(s16,1,( short int *)p);
+ vec_ste(s16,1,(_104 *)p);
+ vec_stl(s16,1,( short int *)p);
+ vec_stl(s16,1,(_104 *)p);
+ u8 = vec_lvsl(1,(const volatile int *)p);
+ u8 = vec_lvsl(1,(_105 *)p);
+ u8 = vec_lvsr(1,(const volatile int *)p);
+ u8 = vec_lvsr(1,(_105 *)p);
+ u8 = vec_lvsl(1,(const int *)p);
+ u8 = vec_lvsl(1,(_106 *)p);
+ u8 = vec_lvsr(1,(const int *)p);
+ u8 = vec_lvsr(1,(_106 *)p);
+ s32 = vec_ld(1,(const int *)p);
+ s32 = vec_ld(1,(_106 *)p);
+ s32 = vec_lde(1,(const int *)p);
+ s32 = vec_lde(1,(_106 *)p);
+ s32 = vec_ldl(1,(const int *)p);
+ s32 = vec_ldl(1,(_106 *)p);
+ vec_dst((const int *)p,1,1);
+ vec_dstst((const int *)p,1,1);
+ vec_dststt((const int *)p,1,1);
+ vec_dstt((const int *)p,1,1);
+ vec_dst((_106 *)p,1,1);
+ vec_dstst((_106 *)p,1,1);
+ vec_dststt((_106 *)p,1,1);
+ vec_dstt((_106 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int *)p);
+ u8 = vec_lvsl(1,(_107 *)p);
+ u8 = vec_lvsr(1,( volatile int *)p);
+ u8 = vec_lvsr(1,(_107 *)p);
+ u8 = vec_lvsl(1,( int *)p);
+ u8 = vec_lvsl(1,(_108 *)p);
+ u8 = vec_lvsr(1,( int *)p);
+ u8 = vec_lvsr(1,(_108 *)p);
+ s32 = vec_ld(1,( int *)p);
+ s32 = vec_ld(1,(_108 *)p);
+ s32 = vec_lde(1,( int *)p);
+ s32 = vec_lde(1,(_108 *)p);
+ s32 = vec_ldl(1,( int *)p);
+ s32 = vec_ldl(1,(_108 *)p);
+ vec_dst(( int *)p,1,1);
+ vec_dstst(( int *)p,1,1);
+ vec_dststt(( int *)p,1,1);
+ vec_dstt(( int *)p,1,1);
+ vec_dst((_108 *)p,1,1);
+ vec_dstst((_108 *)p,1,1);
+ vec_dststt((_108 *)p,1,1);
+ vec_dstt((_108 *)p,1,1);
+ vec_st(s32,1,( int *)p);
+ vec_st(s32,1,(_108 *)p);
+ vec_ste(s32,1,( int *)p);
+ vec_ste(s32,1,(_108 *)p);
+ vec_stl(s32,1,( int *)p);
+ vec_stl(s32,1,(_108 *)p);
+ u8 = vec_lvsl(1,(const volatile int *)p);
+ u8 = vec_lvsl(1,(_109 *)p);
+ u8 = vec_lvsr(1,(const volatile int *)p);
+ u8 = vec_lvsr(1,(_109 *)p);
+ u8 = vec_lvsl(1,(const int *)p);
+ u8 = vec_lvsl(1,(_110 *)p);
+ u8 = vec_lvsr(1,(const int *)p);
+ u8 = vec_lvsr(1,(_110 *)p);
+ s32 = vec_ld(1,(const int *)p);
+ s32 = vec_ld(1,(_110 *)p);
+ s32 = vec_lde(1,(const int *)p);
+ s32 = vec_lde(1,(_110 *)p);
+ s32 = vec_ldl(1,(const int *)p);
+ s32 = vec_ldl(1,(_110 *)p);
+ vec_dst((const int *)p,1,1);
+ vec_dstst((const int *)p,1,1);
+ vec_dststt((const int *)p,1,1);
+ vec_dstt((const int *)p,1,1);
+ vec_dst((_110 *)p,1,1);
+ vec_dstst((_110 *)p,1,1);
+ vec_dststt((_110 *)p,1,1);
+ vec_dstt((_110 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int *)p);
+ u8 = vec_lvsl(1,(_111 *)p);
+ u8 = vec_lvsr(1,( volatile int *)p);
+ u8 = vec_lvsr(1,(_111 *)p);
+ u8 = vec_lvsl(1,( int *)p);
+ u8 = vec_lvsl(1,(_112 *)p);
+ u8 = vec_lvsr(1,( int *)p);
+ u8 = vec_lvsr(1,(_112 *)p);
+ s32 = vec_ld(1,( int *)p);
+ s32 = vec_ld(1,(_112 *)p);
+ s32 = vec_lde(1,( int *)p);
+ s32 = vec_lde(1,(_112 *)p);
+ s32 = vec_ldl(1,( int *)p);
+ s32 = vec_ldl(1,(_112 *)p);
+ vec_dst(( int *)p,1,1);
+ vec_dstst(( int *)p,1,1);
+ vec_dststt(( int *)p,1,1);
+ vec_dstt(( int *)p,1,1);
+ vec_dst((_112 *)p,1,1);
+ vec_dstst((_112 *)p,1,1);
+ vec_dststt((_112 *)p,1,1);
+ vec_dstt((_112 *)p,1,1);
+ vec_st(s32,1,( int *)p);
+ vec_st(s32,1,(_112 *)p);
+ vec_ste(s32,1,( int *)p);
+ vec_ste(s32,1,(_112 *)p);
+ vec_stl(s32,1,( int *)p);
+ vec_stl(s32,1,(_112 *)p);
+ u8 = vec_ld(1,(const vector unsigned char *)p);
+ u8 = vec_ld(1,(_114 *)p);
+ u8 = vec_ldl(1,(const vector unsigned char *)p);
+ u8 = vec_ldl(1,(_114 *)p);
+ vec_dst((const vector unsigned char *)p,1,1);
+ vec_dstst((const vector unsigned char *)p,1,1);
+ vec_dststt((const vector unsigned char *)p,1,1);
+ vec_dstt((const vector unsigned char *)p,1,1);
+ vec_dst((_114 *)p,1,1);
+ vec_dstst((_114 *)p,1,1);
+ vec_dststt((_114 *)p,1,1);
+ vec_dstt((_114 *)p,1,1);
+ u8 = vec_ld(1,( vector unsigned char *)p);
+ u8 = vec_ld(1,(_116 *)p);
+ u8 = vec_ldl(1,( vector unsigned char *)p);
+ u8 = vec_ldl(1,(_116 *)p);
+ vec_dst(( vector unsigned char *)p,1,1);
+ vec_dstst(( vector unsigned char *)p,1,1);
+ vec_dststt(( vector unsigned char *)p,1,1);
+ vec_dstt(( vector unsigned char *)p,1,1);
+ vec_dst((_116 *)p,1,1);
+ vec_dstst((_116 *)p,1,1);
+ vec_dststt((_116 *)p,1,1);
+ vec_dstt((_116 *)p,1,1);
+ vec_st(u8,1,( vector unsigned char *)p);
+ vec_st(u8,1,(_116 *)p);
+ vec_stl(u8,1,( vector unsigned char *)p);
+ vec_stl(u8,1,(_116 *)p);
+ u8 = vec_lvsl(1,(const volatile signed char *)p);
+ u8 = vec_lvsl(1,(_117 *)p);
+ u8 = vec_lvsr(1,(const volatile signed char *)p);
+ u8 = vec_lvsr(1,(_117 *)p);
+ u8 = vec_lvsl(1,(const signed char *)p);
+ u8 = vec_lvsl(1,(_118 *)p);
+ u8 = vec_lvsr(1,(const signed char *)p);
+ u8 = vec_lvsr(1,(_118 *)p);
+ s8 = vec_ld(1,(const signed char *)p);
+ s8 = vec_ld(1,(_118 *)p);
+ s8 = vec_lde(1,(const signed char *)p);
+ s8 = vec_lde(1,(_118 *)p);
+ s8 = vec_ldl(1,(const signed char *)p);
+ s8 = vec_ldl(1,(_118 *)p);
+ vec_dst((const signed char *)p,1,1);
+ vec_dstst((const signed char *)p,1,1);
+ vec_dststt((const signed char *)p,1,1);
+ vec_dstt((const signed char *)p,1,1);
+ vec_dst((_118 *)p,1,1);
+ vec_dstst((_118 *)p,1,1);
+ vec_dststt((_118 *)p,1,1);
+ vec_dstt((_118 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed char *)p);
+ u8 = vec_lvsl(1,(_119 *)p);
+ u8 = vec_lvsr(1,( volatile signed char *)p);
+ u8 = vec_lvsr(1,(_119 *)p);
+ u8 = vec_lvsl(1,( signed char *)p);
+ u8 = vec_lvsl(1,(_120 *)p);
+ u8 = vec_lvsr(1,( signed char *)p);
+ u8 = vec_lvsr(1,(_120 *)p);
+ s8 = vec_ld(1,( signed char *)p);
+ s8 = vec_ld(1,(_120 *)p);
+ s8 = vec_lde(1,( signed char *)p);
+ s8 = vec_lde(1,(_120 *)p);
+ s8 = vec_ldl(1,( signed char *)p);
+ s8 = vec_ldl(1,(_120 *)p);
+ vec_dst(( signed char *)p,1,1);
+ vec_dstst(( signed char *)p,1,1);
+ vec_dststt(( signed char *)p,1,1);
+ vec_dstt(( signed char *)p,1,1);
+ vec_dst((_120 *)p,1,1);
+ vec_dstst((_120 *)p,1,1);
+ vec_dststt((_120 *)p,1,1);
+ vec_dstt((_120 *)p,1,1);
+ vec_st(s8,1,( signed char *)p);
+ vec_st(s8,1,(_120 *)p);
+ vec_ste(s8,1,( signed char *)p);
+ vec_ste(s8,1,(_120 *)p);
+ vec_stl(s8,1,( signed char *)p);
+ vec_stl(s8,1,(_120 *)p);
+ u8 = vec_lvsl(1,(const volatile float *)p);
+ u8 = vec_lvsl(1,(_121 *)p);
+ u8 = vec_lvsr(1,(const volatile float *)p);
+ u8 = vec_lvsr(1,(_121 *)p);
+ u8 = vec_lvsl(1,(const float *)p);
+ u8 = vec_lvsl(1,(_122 *)p);
+ u8 = vec_lvsr(1,(const float *)p);
+ u8 = vec_lvsr(1,(_122 *)p);
+ f32 = vec_ld(1,(const float *)p);
+ f32 = vec_ld(1,(_122 *)p);
+ f32 = vec_lde(1,(const float *)p);
+ f32 = vec_lde(1,(_122 *)p);
+ f32 = vec_ldl(1,(const float *)p);
+ f32 = vec_ldl(1,(_122 *)p);
+ vec_dst((const float *)p,1,1);
+ vec_dstst((const float *)p,1,1);
+ vec_dststt((const float *)p,1,1);
+ vec_dstt((const float *)p,1,1);
+ vec_dst((_122 *)p,1,1);
+ vec_dstst((_122 *)p,1,1);
+ vec_dststt((_122 *)p,1,1);
+ vec_dstt((_122 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile float *)p);
+ u8 = vec_lvsl(1,(_123 *)p);
+ u8 = vec_lvsr(1,( volatile float *)p);
+ u8 = vec_lvsr(1,(_123 *)p);
+ u8 = vec_lvsl(1,( float *)p);
+ u8 = vec_lvsl(1,(_124 *)p);
+ u8 = vec_lvsr(1,( float *)p);
+ u8 = vec_lvsr(1,(_124 *)p);
+ f32 = vec_ld(1,( float *)p);
+ f32 = vec_ld(1,(_124 *)p);
+ f32 = vec_lde(1,( float *)p);
+ f32 = vec_lde(1,(_124 *)p);
+ f32 = vec_ldl(1,( float *)p);
+ f32 = vec_ldl(1,(_124 *)p);
+ vec_dst(( float *)p,1,1);
+ vec_dstst(( float *)p,1,1);
+ vec_dststt(( float *)p,1,1);
+ vec_dstt(( float *)p,1,1);
+ vec_dst((_124 *)p,1,1);
+ vec_dstst((_124 *)p,1,1);
+ vec_dststt((_124 *)p,1,1);
+ vec_dstt((_124 *)p,1,1);
+ vec_st(f32,1,( float *)p);
+ vec_st(f32,1,(_124 *)p);
+ vec_ste(f32,1,( float *)p);
+ vec_ste(f32,1,(_124 *)p);
+ vec_stl(f32,1,( float *)p);
+ vec_stl(f32,1,(_124 *)p);
+}