blob: 65d7af0af006c705ccf29ad70749a0c586aaa1fb [file] [log] [blame]
The Android Open Source Project4f6e8d72008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/atomic.h>
18#ifdef HAVE_WIN32_THREADS
19#include <windows.h>
20#else
21#include <sched.h>
22#endif
23
24/*****************************************************************************/
25#if defined(HAVE_MACOSX_IPC)
26
27#include <libkern/OSAtomic.h>
28
29void android_atomic_write(int32_t value, volatile int32_t* addr) {
30 int32_t oldValue;
31 do {
32 oldValue = *addr;
33 } while (OSAtomicCompareAndSwap32Barrier(oldValue, value, (int32_t*)addr) == 0);
34}
35
36int32_t android_atomic_inc(volatile int32_t* addr) {
37 return OSAtomicIncrement32Barrier((int32_t*)addr)-1;
38}
39
40int32_t android_atomic_dec(volatile int32_t* addr) {
41 return OSAtomicDecrement32Barrier((int32_t*)addr)+1;
42}
43
44int32_t android_atomic_add(int32_t value, volatile int32_t* addr) {
45 return OSAtomicAdd32Barrier(value, (int32_t*)addr)-value;
46}
47
48int32_t android_atomic_and(int32_t value, volatile int32_t* addr) {
49 int32_t oldValue;
50 do {
51 oldValue = *addr;
52 } while (OSAtomicCompareAndSwap32Barrier(oldValue, oldValue&value, (int32_t*)addr) == 0);
53 return oldValue;
54}
55
56int32_t android_atomic_or(int32_t value, volatile int32_t* addr) {
57 int32_t oldValue;
58 do {
59 oldValue = *addr;
60 } while (OSAtomicCompareAndSwap32Barrier(oldValue, oldValue|value, (int32_t*)addr) == 0);
61 return oldValue;
62}
63
64int32_t android_atomic_swap(int32_t value, volatile int32_t* addr) {
65 int32_t oldValue;
66 do {
67 oldValue = *addr;
68 } while (android_atomic_cmpxchg(oldValue, value, addr));
69 return oldValue;
70}
71
72int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, volatile int32_t* addr) {
73 return OSAtomicCompareAndSwap32Barrier(oldvalue, newvalue, (int32_t*)addr) == 0;
74}
75
76#if defined(__ppc__) \
77 || defined(__PPC__) \
78 || defined(__powerpc__) \
79 || defined(__powerpc) \
80 || defined(__POWERPC__) \
81 || defined(_M_PPC) \
82 || defined(__PPC)
83#define NEED_QUASIATOMICS 1
84#else
85
86int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
87 volatile int64_t* addr) {
88 return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
89 (int64_t*)addr) == 0;
90}
91
92int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
93 int64_t oldValue;
94 do {
95 oldValue = *addr;
96 } while (android_quasiatomic_cmpxchg_64(oldValue, value, addr));
97 return oldValue;
98}
99
100int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
101 return OSAtomicAdd64Barrier(0, addr);
102}
103
104#endif
105
106
107/*****************************************************************************/
108#elif defined(__i386__) || defined(__x86_64__)
109
110void android_atomic_write(int32_t value, volatile int32_t* addr) {
111 int32_t oldValue;
112 do {
113 oldValue = *addr;
114 } while (android_atomic_cmpxchg(oldValue, value, addr));
115}
116
117int32_t android_atomic_inc(volatile int32_t* addr) {
118 int32_t oldValue;
119 do {
120 oldValue = *addr;
121 } while (android_atomic_cmpxchg(oldValue, oldValue+1, addr));
122 return oldValue;
123}
124
125int32_t android_atomic_dec(volatile int32_t* addr) {
126 int32_t oldValue;
127 do {
128 oldValue = *addr;
129 } while (android_atomic_cmpxchg(oldValue, oldValue-1, addr));
130 return oldValue;
131}
132
133int32_t android_atomic_add(int32_t value, volatile int32_t* addr) {
134 int32_t oldValue;
135 do {
136 oldValue = *addr;
137 } while (android_atomic_cmpxchg(oldValue, oldValue+value, addr));
138 return oldValue;
139}
140
141int32_t android_atomic_and(int32_t value, volatile int32_t* addr) {
142 int32_t oldValue;
143 do {
144 oldValue = *addr;
145 } while (android_atomic_cmpxchg(oldValue, oldValue&value, addr));
146 return oldValue;
147}
148
149int32_t android_atomic_or(int32_t value, volatile int32_t* addr) {
150 int32_t oldValue;
151 do {
152 oldValue = *addr;
153 } while (android_atomic_cmpxchg(oldValue, oldValue|value, addr));
154 return oldValue;
155}
156
157int32_t android_atomic_swap(int32_t value, volatile int32_t* addr) {
158 int32_t oldValue;
159 do {
160 oldValue = *addr;
161 } while (android_atomic_cmpxchg(oldValue, value, addr));
162 return oldValue;
163}
164
165int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, volatile int32_t* addr) {
166 int xchg;
167 asm volatile
168 (
169 " lock; cmpxchg %%ecx, (%%edx);"
170 " setne %%al;"
171 " andl $1, %%eax"
172 : "=a" (xchg)
173 : "a" (oldvalue), "c" (newvalue), "d" (addr)
174 );
175 return xchg;
176}
177
178#define NEED_QUASIATOMICS 1
179
180/*****************************************************************************/
181#elif __arm__
182// Most of the implementation is in atomic-android-arm.s.
183
184// on the device, we implement the 64-bit atomic operations through
185// mutex locking. normally, this is bad because we must initialize
186// a pthread_mutex_t before being able to use it, and this means
187// having to do an initialization check on each function call, and
188// that's where really ugly things begin...
189//
190// BUT, as a special twist, we take advantage of the fact that in our
191// pthread library, a mutex is simply a volatile word whose value is always
192// initialized to 0. In other words, simply declaring a static mutex
193// object initializes it !
194//
195// another twist is that we use a small array of mutexes to dispatch
196// the contention locks from different memory addresses
197//
198
199#include <pthread.h>
200
201#define SWAP_LOCK_COUNT 32U
202static pthread_mutex_t _swap_locks[SWAP_LOCK_COUNT];
203
204#define SWAP_LOCK(addr) \
205 &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
206
207
208int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
209 int64_t oldValue;
210 pthread_mutex_t* lock = SWAP_LOCK(addr);
211
212 pthread_mutex_lock(lock);
213
214 oldValue = *addr;
215 *addr = value;
216
217 pthread_mutex_unlock(lock);
218 return oldValue;
219}
220
221int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
222 volatile int64_t* addr) {
223 int result;
224 pthread_mutex_t* lock = SWAP_LOCK(addr);
225
226 pthread_mutex_lock(lock);
227
228 if (*addr == oldvalue) {
229 *addr = newvalue;
230 result = 0;
231 } else {
232 result = 1;
233 }
234 pthread_mutex_unlock(lock);
235 return result;
236}
237
238int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
239 int64_t result;
240 pthread_mutex_t* lock = SWAP_LOCK(addr);
241
242 pthread_mutex_lock(lock);
243 result = *addr;
244 pthread_mutex_unlock(lock);
245 return result;
246}
247
248#else
249
250#error "Unsupported atomic operations for this platform"
251
252#endif
253
254
255
256#if NEED_QUASIATOMICS
257
258/* Note that a spinlock is *not* a good idea in general
259 * since they can introduce subtle issues. For example,
260 * a real-time thread trying to acquire a spinlock already
261 * acquired by another thread will never yeld, making the
262 * CPU loop endlessly!
263 *
264 * However, this code is only used on the Linux simulator
265 * so it's probably ok for us.
266 *
267 * The alternative is to use a pthread mutex, but
268 * these must be initialized before being used, and
269 * then you have the problem of lazily initializing
270 * a mutex without any other synchronization primitive.
271 */
272
273/* global spinlock for all 64-bit quasiatomic operations */
274static int32_t quasiatomic_spinlock = 0;
275
276int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
277 volatile int64_t* addr) {
278 int result;
279
280 while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
281#ifdef HAVE_WIN32_THREADS
282 Sleep(0);
283#else
284 sched_yield();
285#endif
286 }
287
288 if (*addr == oldvalue) {
289 *addr = newvalue;
290 result = 0;
291 } else {
292 result = 1;
293 }
294
295 android_atomic_swap(0, &quasiatomic_spinlock);
296
297 return result;
298}
299
300int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
301 int64_t result;
302
303 while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
304#ifdef HAVE_WIN32_THREADS
305 Sleep(0);
306#else
307 sched_yield();
308#endif
309 }
310
311 result = *addr;
312 android_atomic_swap(0, &quasiatomic_spinlock);
313
314 return result;
315}
316
317int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
318 int64_t result;
319
320 while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
321#ifdef HAVE_WIN32_THREADS
322 Sleep(0);
323#else
324 sched_yield();
325#endif
326 }
327
328 result = *addr;
329 *addr = value;
330 android_atomic_swap(0, &quasiatomic_spinlock);
331
332 return result;
333}
334
335#endif