1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
/* { dg-do run } */
/* { dg-require-effective-target fma4 } */
/* { dg-options "-O2 -mfma4" } */
#include "fma4-check.h"
#include <x86intrin.h>
#include <string.h>
#define NUM 20
union
{
__m256 x[NUM];
float f[NUM * 8];
__m256d y[NUM];
double d[NUM * 4];
} dst, res, src1, src2, src3;
/* Note that in macc*,msub*,mnmacc* and mnsub* instructions, the intermdediate
product is not rounded, only the addition is rounded. */
static void
init_nmaccps ()
{
int i;
for (i = 0; i < NUM * 8; i++)
{
src1.f[i] = i;
src2.f[i] = i + 10;
src3.f[i] = i + 20;
}
}
static void
init_nmaccpd ()
{
int i;
for (i = 0; i < NUM * 4; i++)
{
src1.d[i] = i;
src2.d[i] = i + 10;
src3.d[i] = i + 20;
}
}
static int
check_nmaccps ()
{
int i, j, check_fails = 0;
for (i = 0; i < NUM * 8; i = i + 8)
for (j = 0; j < 8; j++)
{
res.f[i + j] = - (src1.f[i + j] * src2.f[i + j]) + src3.f[i + j];
if (dst.f[i + j] != res.f[i + j])
check_fails++;
}
return check_fails++;
}
static int
check_nmaccpd ()
{
int i, j, check_fails = 0;
for (i = 0; i < NUM * 4; i = i + 4)
for (j = 0; j < 4; j++)
{
res.d[i + j] = - (src1.d[i + j] * src2.d[i + j]) + src3.d[i + j];
if (dst.d[i + j] != res.d[i + j])
check_fails++;
}
return check_fails++;
}
static void
fma4_test (void)
{
int i;
init_nmaccps ();
for (i = 0; i < NUM; i++)
dst.x[i] = _mm256_nmacc_ps (src1.x[i], src2.x[i], src3.x[i]);
if (check_nmaccps ())
abort ();
init_nmaccpd ();
for (i = 0; i < NUM; i++)
dst.y[i] = _mm256_nmacc_pd (src1.y[i], src2.y[i], src3.y[i]);
if (check_nmaccpd ())
abort ();
}
|