summaryrefslogtreecommitdiff
path: root/gcc/testsuite/gcc.dg/tree-ssa/asmgoto-1.c
blob: 9dd549906b63f91ad35b9444c992f90c99bd010b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
/* { dg-do compile } */
/* { dg-options "-O2 -w -fdump-tree-optimized" } */

extern void XYZZY (void);
typedef unsigned long __kernel_size_t;
typedef __kernel_size_t size_t;
typedef unsigned gfp_t;
struct per_cpu_pageset { } __attribute__ ((__aligned__ ((1 << (6)))));
struct zone { struct per_cpu_pageset *pageset[64]; }
zone_flags_t; typedef struct pglist_data { struct zone node_zones[4]; } pg_data_t;
extern struct pglist_data *first_online_pgdat (void);
extern struct zone *next_zone (struct zone *zone);
extern volatile int per_cpu__x86_cpu_to_node_map[];
struct kmem_cache { int size; };
extern struct kmem_cache kmalloc_caches[(12 + 2)];
struct tracepoint { void **funcs; } __attribute__ ((aligned (32)));
extern struct tracepoint __tracepoint_kmalloc_node;
void *__kmalloc_node (size_t size, gfp_t flags, int node);

static inline int
cpu_to_node (int cpu)
{
  return per_cpu__x86_cpu_to_node_map[cpu];
}

static inline void
trace_kmalloc_node (unsigned long call_site, const void *ptr,
		    size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags,
		    int node)
{
  asm goto ("" : : : : trace_label);
  if (0)
    {
	  void **it_func;
    trace_label:
	  asm ("" : "=r"(it_func) : "0"(&__tracepoint_kmalloc_node.funcs));
    }
};

static inline __attribute__ ((always_inline)) int
kmalloc_index (size_t size)
{
  if (size <= 64)
    return 6;
  return -1;
}

static inline __attribute__ ((always_inline)) struct kmem_cache *
kmalloc_slab (size_t size)
{
  int index = kmalloc_index (size);
  if (index == 0)
    return ((void *) 0);
  return &kmalloc_caches[index];
}

static inline __attribute__ ((always_inline)) void *
kmalloc_node (size_t size, gfp_t flags, int node)
{
  void *ret;
  if (__builtin_constant_p (size) && size <= (2 * ((1UL) << 12))
      && !(flags & ((gfp_t) 0x01u)))
    {
      struct kmem_cache *s = kmalloc_slab (size);
      if (!s)
	return ((void *) 16);
      trace_kmalloc_node (({ __here:(unsigned long) &&__here;}),
			  ret, size, s->size, flags, node);
    }
  return __kmalloc_node (size, flags, node);
}

int
process_zones (int cpu)
{
  struct zone *zone, *dzone;
  int node = cpu_to_node (cpu);
  for (zone = (first_online_pgdat ())->node_zones;
       zone; zone = next_zone (zone))
      {
	((zone)->pageset[(cpu)]) =
	  kmalloc_node (sizeof (struct per_cpu_pageset),
			(((gfp_t) 0x10u) | ((gfp_t) 0x40u) | ((gfp_t) 0x80u)),
			node);
	if (!((zone)->pageset[(cpu)]))
	  goto bad;
      }
  return 0;
bad:
  XYZZY ();
  return -12;
}

/* { dg-final { scan-tree-dump-times "XYZZY" 1 "optimized" } } */
/* { dg-final { cleanup-tree-dump "optimized" } } */