1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
|
// locks.h - Thread synchronization primitives. ARM implementation.
/* Copyright (C) 2007 Free Software Foundation
This file is part of libgcj.
This software is copyrighted work licensed under the terms of the
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
details. */
#ifndef __SYSDEP_LOCKS_H__
#define __SYSDEP_LOCKS_H__
typedef size_t obj_addr_t; /* Integer type big enough for object */
/* address. */
#if (__ARM_EABI__ && __linux)
// Atomically replace *addr by new_val if it was initially equal to old.
// Return true if the comparison succeeded.
// Assumed to have acquire semantics, i.e. later memory operations
// cannot execute before the compare_and_swap finishes.
inline static bool
compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
return __sync_bool_compare_and_swap(addr, old, new_val);
}
// Set *addr to new_val with release semantics, i.e. making sure
// that prior loads and stores complete before this
// assignment.
inline static void
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
{
__sync_synchronize();
*(addr) = new_val;
}
// Compare_and_swap with release semantics instead of acquire semantics.
// On many architecture, the operation makes both guarantees, so the
// implementation can be the same.
inline static bool
compare_and_swap_release(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
return __sync_bool_compare_and_swap(addr, old, new_val);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
// On X86, the hardware ensures that reads are properly ordered.
inline static void
read_barrier()
{
__sync_synchronize();
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline static void
write_barrier()
{
__sync_synchronize();
}
#else
/* Atomic compare and exchange. These sequences are not actually
atomic; there is a race if *ADDR != OLD_VAL and we are preempted
between the two swaps. However, they are very close to atomic, and
are the best that a pre-ARMv6 implementation can do without
operating system support. LinuxThreads has been using these
sequences for many years. */
inline static bool
compare_and_swap(volatile obj_addr_t *addr,
obj_addr_t old_val,
obj_addr_t new_val)
{
volatile obj_addr_t result, tmp;
__asm__ ("\n"
"0: ldr %[tmp],[%[addr]]\n"
" cmp %[tmp],%[old_val]\n"
" movne %[result],#0\n"
" bne 1f\n"
" swp %[result],%[new_val],[%[addr]]\n"
" cmp %[tmp],%[result]\n"
" swpne %[tmp],%[result],[%[addr]]\n"
" bne 0b\n"
" mov %[result],#1\n"
"1:"
: [result] "=&r" (result), [tmp] "=&r" (tmp)
: [addr] "r" (addr), [new_val] "r" (new_val), [old_val] "r" (old_val)
: "cc", "memory");
return result;
}
inline static void
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
{
__asm__ __volatile__("" : : : "memory");
*(addr) = new_val;
}
inline static bool
compare_and_swap_release(volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
return compare_and_swap(addr, old, new_val);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
inline static void
read_barrier()
{
__asm__ __volatile__("" : : : "memory");
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline static void
write_barrier()
{
__asm__ __volatile__("" : : : "memory");
}
#endif
#endif
|