nexmon – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* |
2 | * Copyright © 2011 Ryan Lortie |
||
3 | * |
||
4 | * This library is free software; you can redistribute it and/or modify |
||
5 | * it under the terms of the GNU Lesser General Public License as |
||
6 | * published by the Free Software Foundation; either version 2 of the |
||
7 | * licence, or (at your option) any later version. |
||
8 | * |
||
9 | * This library is distributed in the hope that it will be useful, but |
||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||
12 | * Lesser General Public License for more details. |
||
13 | * |
||
14 | * You should have received a copy of the GNU Lesser General Public |
||
15 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
||
16 | * |
||
17 | * Author: Ryan Lortie <desrt@desrt.ca> |
||
18 | */ |
||
19 | |||
20 | #include "config.h" |
||
21 | |||
22 | #include "gatomic.h" |
||
23 | |||
24 | /** |
||
25 | * SECTION:atomic_operations |
||
26 | * @title: Atomic Operations |
||
27 | * @short_description: basic atomic integer and pointer operations |
||
28 | * @see_also: #GMutex |
||
29 | * |
||
30 | * The following is a collection of compiler macros to provide atomic |
||
31 | * access to integer and pointer-sized values. |
||
32 | * |
||
33 | * The macros that have 'int' in the name will operate on pointers to |
||
34 | * #gint and #guint. The macros with 'pointer' in the name will operate |
||
35 | * on pointers to any pointer-sized value, including #gsize. There is |
||
36 | * no support for 64bit operations on platforms with 32bit pointers |
||
37 | * because it is not generally possible to perform these operations |
||
38 | * atomically. |
||
39 | * |
||
40 | * The get, set and exchange operations for integers and pointers |
||
41 | * nominally operate on #gint and #gpointer, respectively. Of the |
||
42 | * arithmetic operations, the 'add' operation operates on (and returns) |
||
43 | * signed integer values (#gint and #gssize) and the 'and', 'or', and |
||
44 | * 'xor' operations operate on (and return) unsigned integer values |
||
45 | * (#guint and #gsize). |
||
46 | * |
||
47 | * All of the operations act as a full compiler and (where appropriate) |
||
48 | * hardware memory barrier. Acquire and release or producer and |
||
49 | * consumer barrier semantics are not available through this API. |
||
50 | * |
||
51 | * It is very important that all accesses to a particular integer or |
||
52 | * pointer be performed using only this API and that different sizes of |
||
53 | * operation are not mixed or used on overlapping memory regions. Never |
||
54 | * read or assign directly from or to a value -- always use this API. |
||
55 | * |
||
56 | * For simple reference counting purposes you should use |
||
57 | * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that |
||
58 | * fall outside of simple reference counting patterns are prone to |
||
59 | * subtle bugs and occasionally undefined behaviour. It is also worth |
||
60 | * noting that since all of these operations require global |
||
61 | * synchronisation of the entire machine, they can be quite slow. In |
||
62 | * the case of performing multiple atomic operations it can often be |
||
63 | * faster to simply acquire a mutex lock around the critical area, |
||
64 | * perform the operations normally and then release the lock. |
||
65 | **/ |
||
66 | |||
67 | /** |
||
68 | * G_ATOMIC_LOCK_FREE: |
||
69 | * |
||
70 | * This macro is defined if the atomic operations of GLib are |
||
71 | * implemented using real hardware atomic operations. This means that |
||
72 | * the GLib atomic API can be used between processes and safely mixed |
||
73 | * with other (hardware) atomic APIs. |
||
74 | * |
||
75 | * If this macro is not defined, the atomic operations may be |
||
76 | * emulated using a mutex. In that case, the GLib atomic operations are |
||
77 | * only atomic relative to themselves and within a single process. |
||
78 | **/ |
||
79 | |||
80 | /* NOTE CAREFULLY: |
||
81 | * |
||
82 | * This file is the lowest-level part of GLib. |
||
83 | * |
||
84 | * Other lowlevel parts of GLib (threads, slice allocator, g_malloc, |
||
85 | * messages, etc) call into these functions and macros to get work done. |
||
86 | * |
||
87 | * As such, these functions can not call back into any part of GLib |
||
88 | * without risking recursion. |
||
89 | */ |
||
90 | |||
91 | #ifdef G_ATOMIC_LOCK_FREE |
||
92 | |||
93 | /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST |
||
94 | * implement the atomic operations in a lock-free manner. |
||
95 | */ |
||
96 | |||
97 | #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) |
||
98 | /** |
||
99 | * g_atomic_int_get: |
||
100 | * @atomic: a pointer to a #gint or #guint |
||
101 | * |
||
102 | * Gets the current value of @atomic. |
||
103 | * |
||
104 | * This call acts as a full compiler and hardware |
||
105 | * memory barrier (before the get). |
||
106 | * |
||
107 | * Returns: the value of the integer |
||
108 | * |
||
109 | * Since: 2.4 |
||
110 | **/ |
||
111 | gint |
||
112 | (g_atomic_int_get) (const volatile gint *atomic) |
||
113 | { |
||
114 | return g_atomic_int_get (atomic); |
||
115 | } |
||
116 | |||
117 | /** |
||
118 | * g_atomic_int_set: |
||
119 | * @atomic: a pointer to a #gint or #guint |
||
120 | * @newval: a new value to store |
||
121 | * |
||
122 | * Sets the value of @atomic to @newval. |
||
123 | * |
||
124 | * This call acts as a full compiler and hardware |
||
125 | * memory barrier (after the set). |
||
126 | * |
||
127 | * Since: 2.4 |
||
128 | */ |
||
129 | void |
||
130 | (g_atomic_int_set) (volatile gint *atomic, |
||
131 | gint newval) |
||
132 | { |
||
133 | g_atomic_int_set (atomic, newval); |
||
134 | } |
||
135 | |||
136 | /** |
||
137 | * g_atomic_int_inc: |
||
138 | * @atomic: a pointer to a #gint or #guint |
||
139 | * |
||
140 | * Increments the value of @atomic by 1. |
||
141 | * |
||
142 | * Think of this operation as an atomic version of `{ *atomic += 1; }`. |
||
143 | * |
||
144 | * This call acts as a full compiler and hardware memory barrier. |
||
145 | * |
||
146 | * Since: 2.4 |
||
147 | **/ |
||
148 | void |
||
149 | (g_atomic_int_inc) (volatile gint *atomic) |
||
150 | { |
||
151 | g_atomic_int_inc (atomic); |
||
152 | } |
||
153 | |||
154 | /** |
||
155 | * g_atomic_int_dec_and_test: |
||
156 | * @atomic: a pointer to a #gint or #guint |
||
157 | * |
||
158 | * Decrements the value of @atomic by 1. |
||
159 | * |
||
160 | * Think of this operation as an atomic version of |
||
161 | * `{ *atomic -= 1; return (*atomic == 0); }`. |
||
162 | * |
||
163 | * This call acts as a full compiler and hardware memory barrier. |
||
164 | * |
||
165 | * Returns: %TRUE if the resultant value is zero |
||
166 | * |
||
167 | * Since: 2.4 |
||
168 | **/ |
||
169 | gboolean |
||
170 | (g_atomic_int_dec_and_test) (volatile gint *atomic) |
||
171 | { |
||
172 | return g_atomic_int_dec_and_test (atomic); |
||
173 | } |
||
174 | |||
175 | /** |
||
176 | * g_atomic_int_compare_and_exchange: |
||
177 | * @atomic: a pointer to a #gint or #guint |
||
178 | * @oldval: the value to compare with |
||
179 | * @newval: the value to conditionally replace with |
||
180 | * |
||
181 | * Compares @atomic to @oldval and, if equal, sets it to @newval. |
||
182 | * If @atomic was not equal to @oldval then no change occurs. |
||
183 | * |
||
184 | * This compare and exchange is done atomically. |
||
185 | * |
||
186 | * Think of this operation as an atomic version of |
||
187 | * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`. |
||
188 | * |
||
189 | * This call acts as a full compiler and hardware memory barrier. |
||
190 | * |
||
191 | * Returns: %TRUE if the exchange took place |
||
192 | * |
||
193 | * Since: 2.4 |
||
194 | **/ |
||
195 | gboolean |
||
196 | (g_atomic_int_compare_and_exchange) (volatile gint *atomic, |
||
197 | gint oldval, |
||
198 | gint newval) |
||
199 | { |
||
200 | return g_atomic_int_compare_and_exchange (atomic, oldval, newval); |
||
201 | } |
||
202 | |||
203 | /** |
||
204 | * g_atomic_int_add: |
||
205 | * @atomic: a pointer to a #gint or #guint |
||
206 | * @val: the value to add |
||
207 | * |
||
208 | * Atomically adds @val to the value of @atomic. |
||
209 | * |
||
210 | * Think of this operation as an atomic version of |
||
211 | * `{ tmp = *atomic; *atomic += val; return tmp; }`. |
||
212 | * |
||
213 | * This call acts as a full compiler and hardware memory barrier. |
||
214 | * |
||
215 | * Before version 2.30, this function did not return a value |
||
216 | * (but g_atomic_int_exchange_and_add() did, and had the same meaning). |
||
217 | * |
||
218 | * Returns: the value of @atomic before the add, signed |
||
219 | * |
||
220 | * Since: 2.4 |
||
221 | **/ |
||
222 | gint |
||
223 | (g_atomic_int_add) (volatile gint *atomic, |
||
224 | gint val) |
||
225 | { |
||
226 | return g_atomic_int_add (atomic, val); |
||
227 | } |
||
228 | |||
229 | /** |
||
230 | * g_atomic_int_and: |
||
231 | * @atomic: a pointer to a #gint or #guint |
||
232 | * @val: the value to 'and' |
||
233 | * |
||
234 | * Performs an atomic bitwise 'and' of the value of @atomic and @val, |
||
235 | * storing the result back in @atomic. |
||
236 | * |
||
237 | * This call acts as a full compiler and hardware memory barrier. |
||
238 | * |
||
239 | * Think of this operation as an atomic version of |
||
240 | * `{ tmp = *atomic; *atomic &= val; return tmp; }`. |
||
241 | * |
||
242 | * Returns: the value of @atomic before the operation, unsigned |
||
243 | * |
||
244 | * Since: 2.30 |
||
245 | **/ |
||
246 | guint |
||
247 | (g_atomic_int_and) (volatile guint *atomic, |
||
248 | guint val) |
||
249 | { |
||
250 | return g_atomic_int_and (atomic, val); |
||
251 | } |
||
252 | |||
253 | /** |
||
254 | * g_atomic_int_or: |
||
255 | * @atomic: a pointer to a #gint or #guint |
||
256 | * @val: the value to 'or' |
||
257 | * |
||
258 | * Performs an atomic bitwise 'or' of the value of @atomic and @val, |
||
259 | * storing the result back in @atomic. |
||
260 | * |
||
261 | * Think of this operation as an atomic version of |
||
262 | * `{ tmp = *atomic; *atomic |= val; return tmp; }`. |
||
263 | * |
||
264 | * This call acts as a full compiler and hardware memory barrier. |
||
265 | * |
||
266 | * Returns: the value of @atomic before the operation, unsigned |
||
267 | * |
||
268 | * Since: 2.30 |
||
269 | **/ |
||
270 | guint |
||
271 | (g_atomic_int_or) (volatile guint *atomic, |
||
272 | guint val) |
||
273 | { |
||
274 | return g_atomic_int_or (atomic, val); |
||
275 | } |
||
276 | |||
277 | /** |
||
278 | * g_atomic_int_xor: |
||
279 | * @atomic: a pointer to a #gint or #guint |
||
280 | * @val: the value to 'xor' |
||
281 | * |
||
282 | * Performs an atomic bitwise 'xor' of the value of @atomic and @val, |
||
283 | * storing the result back in @atomic. |
||
284 | * |
||
285 | * Think of this operation as an atomic version of |
||
286 | * `{ tmp = *atomic; *atomic ^= val; return tmp; }`. |
||
287 | * |
||
288 | * This call acts as a full compiler and hardware memory barrier. |
||
289 | * |
||
290 | * Returns: the value of @atomic before the operation, unsigned |
||
291 | * |
||
292 | * Since: 2.30 |
||
293 | **/ |
||
294 | guint |
||
295 | (g_atomic_int_xor) (volatile guint *atomic, |
||
296 | guint val) |
||
297 | { |
||
298 | return g_atomic_int_xor (atomic, val); |
||
299 | } |
||
300 | |||
301 | |||
302 | /** |
||
303 | * g_atomic_pointer_get: |
||
304 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
||
305 | * |
||
306 | * Gets the current value of @atomic. |
||
307 | * |
||
308 | * This call acts as a full compiler and hardware |
||
309 | * memory barrier (before the get). |
||
310 | * |
||
311 | * Returns: the value of the pointer |
||
312 | * |
||
313 | * Since: 2.4 |
||
314 | **/ |
||
315 | gpointer |
||
316 | (g_atomic_pointer_get) (const volatile void *atomic) |
||
317 | { |
||
318 | return g_atomic_pointer_get ((const volatile gpointer *) atomic); |
||
319 | } |
||
320 | |||
321 | /** |
||
322 | * g_atomic_pointer_set: |
||
323 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
||
324 | * @newval: a new value to store |
||
325 | * |
||
326 | * Sets the value of @atomic to @newval. |
||
327 | * |
||
328 | * This call acts as a full compiler and hardware |
||
329 | * memory barrier (after the set). |
||
330 | * |
||
331 | * Since: 2.4 |
||
332 | **/ |
||
333 | void |
||
334 | (g_atomic_pointer_set) (volatile void *atomic, |
||
335 | gpointer newval) |
||
336 | { |
||
337 | g_atomic_pointer_set ((volatile gpointer *) atomic, newval); |
||
338 | } |
||
339 | |||
340 | /** |
||
341 | * g_atomic_pointer_compare_and_exchange: |
||
342 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
||
343 | * @oldval: the value to compare with |
||
344 | * @newval: the value to conditionally replace with |
||
345 | * |
||
346 | * Compares @atomic to @oldval and, if equal, sets it to @newval. |
||
347 | * If @atomic was not equal to @oldval then no change occurs. |
||
348 | * |
||
349 | * This compare and exchange is done atomically. |
||
350 | * |
||
351 | * Think of this operation as an atomic version of |
||
352 | * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`. |
||
353 | * |
||
354 | * This call acts as a full compiler and hardware memory barrier. |
||
355 | * |
||
356 | * Returns: %TRUE if the exchange took place |
||
357 | * |
||
358 | * Since: 2.4 |
||
359 | **/ |
||
360 | gboolean |
||
361 | (g_atomic_pointer_compare_and_exchange) (volatile void *atomic, |
||
362 | gpointer oldval, |
||
363 | gpointer newval) |
||
364 | { |
||
365 | return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic, |
||
366 | oldval, newval); |
||
367 | } |
||
368 | |||
369 | /** |
||
370 | * g_atomic_pointer_add: |
||
371 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
||
372 | * @val: the value to add |
||
373 | * |
||
374 | * Atomically adds @val to the value of @atomic. |
||
375 | * |
||
376 | * Think of this operation as an atomic version of |
||
377 | * `{ tmp = *atomic; *atomic += val; return tmp; }`. |
||
378 | * |
||
379 | * This call acts as a full compiler and hardware memory barrier. |
||
380 | * |
||
381 | * Returns: the value of @atomic before the add, signed |
||
382 | * |
||
383 | * Since: 2.30 |
||
384 | **/ |
||
385 | gssize |
||
386 | (g_atomic_pointer_add) (volatile void *atomic, |
||
387 | gssize val) |
||
388 | { |
||
389 | return g_atomic_pointer_add ((volatile gpointer *) atomic, val); |
||
390 | } |
||
391 | |||
392 | /** |
||
393 | * g_atomic_pointer_and: |
||
394 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
||
395 | * @val: the value to 'and' |
||
396 | * |
||
397 | * Performs an atomic bitwise 'and' of the value of @atomic and @val, |
||
398 | * storing the result back in @atomic. |
||
399 | * |
||
400 | * Think of this operation as an atomic version of |
||
401 | * `{ tmp = *atomic; *atomic &= val; return tmp; }`. |
||
402 | * |
||
403 | * This call acts as a full compiler and hardware memory barrier. |
||
404 | * |
||
405 | * Returns: the value of @atomic before the operation, unsigned |
||
406 | * |
||
407 | * Since: 2.30 |
||
408 | **/ |
||
409 | gsize |
||
410 | (g_atomic_pointer_and) (volatile void *atomic, |
||
411 | gsize val) |
||
412 | { |
||
413 | return g_atomic_pointer_and ((volatile gpointer *) atomic, val); |
||
414 | } |
||
415 | |||
416 | /** |
||
417 | * g_atomic_pointer_or: |
||
418 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
||
419 | * @val: the value to 'or' |
||
420 | * |
||
421 | * Performs an atomic bitwise 'or' of the value of @atomic and @val, |
||
422 | * storing the result back in @atomic. |
||
423 | * |
||
424 | * Think of this operation as an atomic version of |
||
425 | * `{ tmp = *atomic; *atomic |= val; return tmp; }`. |
||
426 | * |
||
427 | * This call acts as a full compiler and hardware memory barrier. |
||
428 | * |
||
429 | * Returns: the value of @atomic before the operation, unsigned |
||
430 | * |
||
431 | * Since: 2.30 |
||
432 | **/ |
||
433 | gsize |
||
434 | (g_atomic_pointer_or) (volatile void *atomic, |
||
435 | gsize val) |
||
436 | { |
||
437 | return g_atomic_pointer_or ((volatile gpointer *) atomic, val); |
||
438 | } |
||
439 | |||
440 | /** |
||
441 | * g_atomic_pointer_xor: |
||
442 | * @atomic: (not nullable): a pointer to a #gpointer-sized value |
||
443 | * @val: the value to 'xor' |
||
444 | * |
||
445 | * Performs an atomic bitwise 'xor' of the value of @atomic and @val, |
||
446 | * storing the result back in @atomic. |
||
447 | * |
||
448 | * Think of this operation as an atomic version of |
||
449 | * `{ tmp = *atomic; *atomic ^= val; return tmp; }`. |
||
450 | * |
||
451 | * This call acts as a full compiler and hardware memory barrier. |
||
452 | * |
||
453 | * Returns: the value of @atomic before the operation, unsigned |
||
454 | * |
||
455 | * Since: 2.30 |
||
456 | **/ |
||
457 | gsize |
||
458 | (g_atomic_pointer_xor) (volatile void *atomic, |
||
459 | gsize val) |
||
460 | { |
||
461 | return g_atomic_pointer_xor ((volatile gpointer *) atomic, val); |
||
462 | } |
||
463 | |||
464 | #elif defined (G_PLATFORM_WIN32) |
||
465 | |||
466 | #include <windows.h> |
||
467 | #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200) |
||
468 | #define InterlockedAnd _InterlockedAnd |
||
469 | #define InterlockedOr _InterlockedOr |
||
470 | #define InterlockedXor _InterlockedXor |
||
471 | #endif |
||
472 | |||
473 | #if !defined (_MSC_VER) || _MSC_VER <= 1200 |
||
474 | #include "gmessages.h" |
||
475 | /* Inlined versions for older compiler */ |
||
476 | static LONG |
||
477 | _gInterlockedAnd (volatile guint *atomic, |
||
478 | guint val) |
||
479 | { |
||
480 | LONG i, j; |
||
481 | |||
482 | j = *atomic; |
||
483 | do { |
||
484 | i = j; |
||
485 | j = InterlockedCompareExchange(atomic, i & val, i); |
||
486 | } while (i != j); |
||
487 | |||
488 | return j; |
||
489 | } |
||
490 | #define InterlockedAnd(a,b) _gInterlockedAnd(a,b) |
||
491 | static LONG |
||
492 | _gInterlockedOr (volatile guint *atomic, |
||
493 | guint val) |
||
494 | { |
||
495 | LONG i, j; |
||
496 | |||
497 | j = *atomic; |
||
498 | do { |
||
499 | i = j; |
||
500 | j = InterlockedCompareExchange(atomic, i | val, i); |
||
501 | } while (i != j); |
||
502 | |||
503 | return j; |
||
504 | } |
||
505 | #define InterlockedOr(a,b) _gInterlockedOr(a,b) |
||
506 | static LONG |
||
507 | _gInterlockedXor (volatile guint *atomic, |
||
508 | guint val) |
||
509 | { |
||
510 | LONG i, j; |
||
511 | |||
512 | j = *atomic; |
||
513 | do { |
||
514 | i = j; |
||
515 | j = InterlockedCompareExchange(atomic, i ^ val, i); |
||
516 | } while (i != j); |
||
517 | |||
518 | return j; |
||
519 | } |
||
520 | #define InterlockedXor(a,b) _gInterlockedXor(a,b) |
||
521 | #endif |
||
522 | |||
523 | /* |
||
524 | * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx |
||
525 | */ |
||
526 | gint |
||
527 | (g_atomic_int_get) (const volatile gint *atomic) |
||
528 | { |
||
529 | MemoryBarrier (); |
||
530 | return *atomic; |
||
531 | } |
||
532 | |||
533 | void |
||
534 | (g_atomic_int_set) (volatile gint *atomic, |
||
535 | gint newval) |
||
536 | { |
||
537 | *atomic = newval; |
||
538 | MemoryBarrier (); |
||
539 | } |
||
540 | |||
541 | void |
||
542 | (g_atomic_int_inc) (volatile gint *atomic) |
||
543 | { |
||
544 | InterlockedIncrement (atomic); |
||
545 | } |
||
546 | |||
547 | gboolean |
||
548 | (g_atomic_int_dec_and_test) (volatile gint *atomic) |
||
549 | { |
||
550 | return InterlockedDecrement (atomic) == 0; |
||
551 | } |
||
552 | |||
553 | gboolean |
||
554 | (g_atomic_int_compare_and_exchange) (volatile gint *atomic, |
||
555 | gint oldval, |
||
556 | gint newval) |
||
557 | { |
||
558 | return InterlockedCompareExchange (atomic, newval, oldval) == oldval; |
||
559 | } |
||
560 | |||
561 | gint |
||
562 | (g_atomic_int_add) (volatile gint *atomic, |
||
563 | gint val) |
||
564 | { |
||
565 | return InterlockedExchangeAdd (atomic, val); |
||
566 | } |
||
567 | |||
568 | guint |
||
569 | (g_atomic_int_and) (volatile guint *atomic, |
||
570 | guint val) |
||
571 | { |
||
572 | return InterlockedAnd (atomic, val); |
||
573 | } |
||
574 | |||
575 | guint |
||
576 | (g_atomic_int_or) (volatile guint *atomic, |
||
577 | guint val) |
||
578 | { |
||
579 | return InterlockedOr (atomic, val); |
||
580 | } |
||
581 | |||
582 | guint |
||
583 | (g_atomic_int_xor) (volatile guint *atomic, |
||
584 | guint val) |
||
585 | { |
||
586 | return InterlockedXor (atomic, val); |
||
587 | } |
||
588 | |||
589 | |||
590 | gpointer |
||
591 | (g_atomic_pointer_get) (const volatile void *atomic) |
||
592 | { |
||
593 | const volatile gpointer *ptr = atomic; |
||
594 | |||
595 | MemoryBarrier (); |
||
596 | return *ptr; |
||
597 | } |
||
598 | |||
599 | void |
||
600 | (g_atomic_pointer_set) (volatile void *atomic, |
||
601 | gpointer newval) |
||
602 | { |
||
603 | volatile gpointer *ptr = atomic; |
||
604 | |||
605 | *ptr = newval; |
||
606 | MemoryBarrier (); |
||
607 | } |
||
608 | |||
609 | gboolean |
||
610 | (g_atomic_pointer_compare_and_exchange) (volatile void *atomic, |
||
611 | gpointer oldval, |
||
612 | gpointer newval) |
||
613 | { |
||
614 | return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval; |
||
615 | } |
||
616 | |||
617 | gssize |
||
618 | (g_atomic_pointer_add) (volatile void *atomic, |
||
619 | gssize val) |
||
620 | { |
||
621 | #if GLIB_SIZEOF_VOID_P == 8 |
||
622 | return InterlockedExchangeAdd64 (atomic, val); |
||
623 | #else |
||
624 | return InterlockedExchangeAdd (atomic, val); |
||
625 | #endif |
||
626 | } |
||
627 | |||
628 | gsize |
||
629 | (g_atomic_pointer_and) (volatile void *atomic, |
||
630 | gsize val) |
||
631 | { |
||
632 | #if GLIB_SIZEOF_VOID_P == 8 |
||
633 | return InterlockedAnd64 (atomic, val); |
||
634 | #else |
||
635 | return InterlockedAnd (atomic, val); |
||
636 | #endif |
||
637 | } |
||
638 | |||
639 | gsize |
||
640 | (g_atomic_pointer_or) (volatile void *atomic, |
||
641 | gsize val) |
||
642 | { |
||
643 | #if GLIB_SIZEOF_VOID_P == 8 |
||
644 | return InterlockedOr64 (atomic, val); |
||
645 | #else |
||
646 | return InterlockedOr (atomic, val); |
||
647 | #endif |
||
648 | } |
||
649 | |||
650 | gsize |
||
651 | (g_atomic_pointer_xor) (volatile void *atomic, |
||
652 | gsize val) |
||
653 | { |
||
654 | #if GLIB_SIZEOF_VOID_P == 8 |
||
655 | return InterlockedXor64 (atomic, val); |
||
656 | #else |
||
657 | return InterlockedXor (atomic, val); |
||
658 | #endif |
||
659 | } |
||
660 | #else |
||
661 | |||
662 | /* This error occurs when ./configure decided that we should be capable |
||
663 | * of lock-free atomics but we find at compile-time that we are not. |
||
664 | */ |
||
665 | #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics. |
||
666 | |||
667 | #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */ |
||
668 | |||
669 | #else /* G_ATOMIC_LOCK_FREE */ |
||
670 | |||
671 | /* We are not permitted to call into any GLib functions from here, so we |
||
672 | * can not use GMutex. |
||
673 | * |
||
674 | * Fortunately, we already take care of the Windows case above, and all |
||
675 | * non-Windows platforms on which glib runs have pthreads. Use those. |
||
676 | */ |
||
677 | #include <pthread.h> |
||
678 | |||
679 | static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER; |
||
680 | |||
681 | gint |
||
682 | (g_atomic_int_get) (const volatile gint *atomic) |
||
683 | { |
||
684 | gint value; |
||
685 | |||
686 | pthread_mutex_lock (&g_atomic_lock); |
||
687 | value = *atomic; |
||
688 | pthread_mutex_unlock (&g_atomic_lock); |
||
689 | |||
690 | return value; |
||
691 | } |
||
692 | |||
693 | void |
||
694 | (g_atomic_int_set) (volatile gint *atomic, |
||
695 | gint value) |
||
696 | { |
||
697 | pthread_mutex_lock (&g_atomic_lock); |
||
698 | *atomic = value; |
||
699 | pthread_mutex_unlock (&g_atomic_lock); |
||
700 | } |
||
701 | |||
702 | void |
||
703 | (g_atomic_int_inc) (volatile gint *atomic) |
||
704 | { |
||
705 | pthread_mutex_lock (&g_atomic_lock); |
||
706 | (*atomic)++; |
||
707 | pthread_mutex_unlock (&g_atomic_lock); |
||
708 | } |
||
709 | |||
710 | gboolean |
||
711 | (g_atomic_int_dec_and_test) (volatile gint *atomic) |
||
712 | { |
||
713 | gboolean is_zero; |
||
714 | |||
715 | pthread_mutex_lock (&g_atomic_lock); |
||
716 | is_zero = --(*atomic) == 0; |
||
717 | pthread_mutex_unlock (&g_atomic_lock); |
||
718 | |||
719 | return is_zero; |
||
720 | } |
||
721 | |||
722 | gboolean |
||
723 | (g_atomic_int_compare_and_exchange) (volatile gint *atomic, |
||
724 | gint oldval, |
||
725 | gint newval) |
||
726 | { |
||
727 | gboolean success; |
||
728 | |||
729 | pthread_mutex_lock (&g_atomic_lock); |
||
730 | |||
731 | if ((success = (*atomic == oldval))) |
||
732 | *atomic = newval; |
||
733 | |||
734 | pthread_mutex_unlock (&g_atomic_lock); |
||
735 | |||
736 | return success; |
||
737 | } |
||
738 | |||
739 | gint |
||
740 | (g_atomic_int_add) (volatile gint *atomic, |
||
741 | gint val) |
||
742 | { |
||
743 | gint oldval; |
||
744 | |||
745 | pthread_mutex_lock (&g_atomic_lock); |
||
746 | oldval = *atomic; |
||
747 | *atomic = oldval + val; |
||
748 | pthread_mutex_unlock (&g_atomic_lock); |
||
749 | |||
750 | return oldval; |
||
751 | } |
||
752 | |||
753 | guint |
||
754 | (g_atomic_int_and) (volatile guint *atomic, |
||
755 | guint val) |
||
756 | { |
||
757 | guint oldval; |
||
758 | |||
759 | pthread_mutex_lock (&g_atomic_lock); |
||
760 | oldval = *atomic; |
||
761 | *atomic = oldval & val; |
||
762 | pthread_mutex_unlock (&g_atomic_lock); |
||
763 | |||
764 | return oldval; |
||
765 | } |
||
766 | |||
767 | guint |
||
768 | (g_atomic_int_or) (volatile guint *atomic, |
||
769 | guint val) |
||
770 | { |
||
771 | guint oldval; |
||
772 | |||
773 | pthread_mutex_lock (&g_atomic_lock); |
||
774 | oldval = *atomic; |
||
775 | *atomic = oldval | val; |
||
776 | pthread_mutex_unlock (&g_atomic_lock); |
||
777 | |||
778 | return oldval; |
||
779 | } |
||
780 | |||
781 | guint |
||
782 | (g_atomic_int_xor) (volatile guint *atomic, |
||
783 | guint val) |
||
784 | { |
||
785 | guint oldval; |
||
786 | |||
787 | pthread_mutex_lock (&g_atomic_lock); |
||
788 | oldval = *atomic; |
||
789 | *atomic = oldval ^ val; |
||
790 | pthread_mutex_unlock (&g_atomic_lock); |
||
791 | |||
792 | return oldval; |
||
793 | } |
||
794 | |||
795 | |||
796 | gpointer |
||
797 | (g_atomic_pointer_get) (const volatile void *atomic) |
||
798 | { |
||
799 | const volatile gpointer *ptr = atomic; |
||
800 | gpointer value; |
||
801 | |||
802 | pthread_mutex_lock (&g_atomic_lock); |
||
803 | value = *ptr; |
||
804 | pthread_mutex_unlock (&g_atomic_lock); |
||
805 | |||
806 | return value; |
||
807 | } |
||
808 | |||
809 | void |
||
810 | (g_atomic_pointer_set) (volatile void *atomic, |
||
811 | gpointer newval) |
||
812 | { |
||
813 | volatile gpointer *ptr = atomic; |
||
814 | |||
815 | pthread_mutex_lock (&g_atomic_lock); |
||
816 | *ptr = newval; |
||
817 | pthread_mutex_unlock (&g_atomic_lock); |
||
818 | } |
||
819 | |||
820 | gboolean |
||
821 | (g_atomic_pointer_compare_and_exchange) (volatile void *atomic, |
||
822 | gpointer oldval, |
||
823 | gpointer newval) |
||
824 | { |
||
825 | volatile gpointer *ptr = atomic; |
||
826 | gboolean success; |
||
827 | |||
828 | pthread_mutex_lock (&g_atomic_lock); |
||
829 | |||
830 | if ((success = (*ptr == oldval))) |
||
831 | *ptr = newval; |
||
832 | |||
833 | pthread_mutex_unlock (&g_atomic_lock); |
||
834 | |||
835 | return success; |
||
836 | } |
||
837 | |||
838 | gssize |
||
839 | (g_atomic_pointer_add) (volatile void *atomic, |
||
840 | gssize val) |
||
841 | { |
||
842 | volatile gssize *ptr = atomic; |
||
843 | gssize oldval; |
||
844 | |||
845 | pthread_mutex_lock (&g_atomic_lock); |
||
846 | oldval = *ptr; |
||
847 | *ptr = oldval + val; |
||
848 | pthread_mutex_unlock (&g_atomic_lock); |
||
849 | |||
850 | return oldval; |
||
851 | } |
||
852 | |||
853 | gsize |
||
854 | (g_atomic_pointer_and) (volatile void *atomic, |
||
855 | gsize val) |
||
856 | { |
||
857 | volatile gsize *ptr = atomic; |
||
858 | gsize oldval; |
||
859 | |||
860 | pthread_mutex_lock (&g_atomic_lock); |
||
861 | oldval = *ptr; |
||
862 | *ptr = oldval & val; |
||
863 | pthread_mutex_unlock (&g_atomic_lock); |
||
864 | |||
865 | return oldval; |
||
866 | } |
||
867 | |||
868 | gsize |
||
869 | (g_atomic_pointer_or) (volatile void *atomic, |
||
870 | gsize val) |
||
871 | { |
||
872 | volatile gsize *ptr = atomic; |
||
873 | gsize oldval; |
||
874 | |||
875 | pthread_mutex_lock (&g_atomic_lock); |
||
876 | oldval = *ptr; |
||
877 | *ptr = oldval | val; |
||
878 | pthread_mutex_unlock (&g_atomic_lock); |
||
879 | |||
880 | return oldval; |
||
881 | } |
||
882 | |||
883 | gsize |
||
884 | (g_atomic_pointer_xor) (volatile void *atomic, |
||
885 | gsize val) |
||
886 | { |
||
887 | volatile gsize *ptr = atomic; |
||
888 | gsize oldval; |
||
889 | |||
890 | pthread_mutex_lock (&g_atomic_lock); |
||
891 | oldval = *ptr; |
||
892 | *ptr = oldval ^ val; |
||
893 | pthread_mutex_unlock (&g_atomic_lock); |
||
894 | |||
895 | return oldval; |
||
896 | } |
||
897 | |||
898 | #endif |
||
899 | |||
900 | /** |
||
901 | * g_atomic_int_exchange_and_add: |
||
902 | * @atomic: a pointer to a #gint |
||
903 | * @val: the value to add |
||
904 | * |
||
905 | * This function existed before g_atomic_int_add() returned the prior |
||
906 | * value of the integer (which it now does). It is retained only for |
||
907 | * compatibility reasons. Don't use this function in new code. |
||
908 | * |
||
909 | * Returns: the value of @atomic before the add, signed |
||
910 | * Since: 2.4 |
||
911 | * Deprecated: 2.30: Use g_atomic_int_add() instead. |
||
912 | **/ |
||
913 | gint |
||
914 | g_atomic_int_exchange_and_add (volatile gint *atomic, |
||
915 | gint val) |
||
916 | { |
||
917 | return (g_atomic_int_add) (atomic, val); |
||
918 | } |