blob: ffed23cf4546c08c6c98e3fd72d9ba21f736b4d7 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
|
/* { dg-do compile } */
/* { dg-options "-O2 -fno-early-inlining" } */
extern void do_something_usefull();
/* Check that we finish compiling even if instructed to
flatten a cyclic callgraph. Verify we correctly
flatten with another function marked flatten in the
callgraph.
Main inline is cureful about indirect calls giving
precedence to breaking cycle at indirect call sites.
Early inliner can't do similar analysis, so we need
to disable it if we want cycles to be broken consistently. */
void __attribute__((flatten)) direct(void)
{
direct();
}
void __attribute__((flatten)) indirect(void);
static void indirect1(void)
{
indirect();
}
void __attribute__((flatten)) indirect(void)
{
indirect1();
}
void __attribute__((flatten)) doubleindirect(void);
static void doubleindirect2(void)
{
doubleindirect();
do_something_usefull ();
}
static void doubleindirect1(void)
{
doubleindirect2();
}
void __attribute__((flatten)) doubleindirect(void)
{
doubleindirect1();
}
static void subcycle1(void);
static void subcycle2(void)
{
subcycle1();
do_something_usefull ();
}
static void subcycle1(void)
{
subcycle2();
}
void __attribute__((flatten)) subcycle(void)
{
subcycle1();
}
static void doublesubcycle1(void);
static void doublesubcycle2(void);
static void doublesubcycle3(void)
{
doublesubcycle1();
do_something_usefull ();
}
static void doublesubcycle2(void)
{
doublesubcycle3();
}
static void doublesubcycle1(void)
{
doublesubcycle2();
}
void __attribute__((flatten)) doublesubcycle(void)
{
doublesubcycle1();
}
/* { dg-final { scan-assembler "cycle\[123\]\[: \t\n\]" } } */
/* { dg-final { scan-assembler-not "indirect\[12\]\[: \t\n\]" } } */
|