nexmon – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | /* GLIB - Library of useful routines for C programming |
2 | * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald |
||
3 | * |
||
4 | * gthread.c: posix thread system implementation |
||
5 | * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe |
||
6 | * |
||
7 | * This library is free software; you can redistribute it and/or |
||
8 | * modify it under the terms of the GNU Lesser General Public |
||
9 | * License as published by the Free Software Foundation; either |
||
10 | * version 2 of the License, or (at your option) any later version. |
||
11 | * |
||
12 | * This library is distributed in the hope that it will be useful, |
||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||
15 | * Lesser General Public License for more details. |
||
16 | * |
||
17 | * You should have received a copy of the GNU Lesser General Public |
||
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
||
19 | */ |
||
20 | |||
21 | /* |
||
22 | * Modified by the GLib Team and others 1997-2000. See the AUTHORS |
||
23 | * file for a list of people on the GLib Team. See the ChangeLog |
||
24 | * files for a list of changes. These files are distributed with |
||
25 | * GLib at ftp://ftp.gtk.org/pub/gtk/. |
||
26 | */ |
||
27 | |||
28 | /* The GMutex, GCond and GPrivate implementations in this file are some |
||
29 | * of the lowest-level code in GLib. All other parts of GLib (messages, |
||
30 | * memory, slices, etc) assume that they can freely use these facilities |
||
31 | * without risking recursion. |
||
32 | * |
||
33 | * As such, these functions are NOT permitted to call any other part of |
||
34 | * GLib. |
||
35 | * |
||
36 | * The thread manipulation functions (create, exit, join, etc.) have |
||
37 | * more freedom -- they can do as they please. |
||
38 | */ |
||
39 | |||
40 | #include "config.h" |
||
41 | |||
42 | #include "gthread.h" |
||
43 | |||
44 | #include "gthreadprivate.h" |
||
45 | #include "gslice.h" |
||
46 | #include "gmessages.h" |
||
47 | #include "gstrfuncs.h" |
||
48 | #include "gmain.h" |
||
49 | |||
50 | #include <stdlib.h> |
||
51 | #include <stdio.h> |
||
52 | #include <string.h> |
||
53 | #include <errno.h> |
||
54 | #include <pthread.h> |
||
55 | |||
56 | #include <sys/time.h> |
||
57 | #include <unistd.h> |
||
58 | |||
59 | #ifdef HAVE_SCHED_H |
||
60 | #include <sched.h> |
||
61 | #endif |
||
62 | #ifdef G_OS_WIN32 |
||
63 | #include <windows.h> |
||
64 | #endif |
||
65 | |||
66 | /* clang defines __ATOMIC_SEQ_CST but doesn't support the GCC extension */ |
||
67 | #if defined(HAVE_FUTEX) && defined(__ATOMIC_SEQ_CST) && !defined(__clang__) |
||
68 | #define USE_NATIVE_MUTEX |
||
69 | #endif |
||
70 | |||
71 | static void |
||
72 | g_thread_abort (gint status, |
||
73 | const gchar *function) |
||
74 | { |
||
75 | fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n", |
||
76 | function, strerror (status)); |
||
77 | abort (); |
||
78 | } |
||
79 | |||
80 | /* {{{1 GMutex */ |
||
81 | |||
82 | #if !defined(USE_NATIVE_MUTEX) |
||
83 | |||
84 | static pthread_mutex_t * |
||
85 | g_mutex_impl_new (void) |
||
86 | { |
||
87 | pthread_mutexattr_t *pattr = NULL; |
||
88 | pthread_mutex_t *mutex; |
||
89 | gint status; |
||
90 | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
||
91 | pthread_mutexattr_t attr; |
||
92 | #endif |
||
93 | |||
94 | mutex = malloc (sizeof (pthread_mutex_t)); |
||
95 | if G_UNLIKELY (mutex == NULL) |
||
96 | g_thread_abort (errno, "malloc"); |
||
97 | |||
98 | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
||
99 | pthread_mutexattr_init (&attr); |
||
100 | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP); |
||
101 | pattr = &attr; |
||
102 | #endif |
||
103 | |||
104 | if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0) |
||
105 | g_thread_abort (status, "pthread_mutex_init"); |
||
106 | |||
107 | #ifdef PTHREAD_ADAPTIVE_MUTEX_NP |
||
108 | pthread_mutexattr_destroy (&attr); |
||
109 | #endif |
||
110 | |||
111 | return mutex; |
||
112 | } |
||
113 | |||
114 | static void |
||
115 | g_mutex_impl_free (pthread_mutex_t *mutex) |
||
116 | { |
||
117 | pthread_mutex_destroy (mutex); |
||
118 | free (mutex); |
||
119 | } |
||
120 | |||
121 | static inline pthread_mutex_t * |
||
122 | g_mutex_get_impl (GMutex *mutex) |
||
123 | { |
||
124 | pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p); |
||
125 | |||
126 | if G_UNLIKELY (impl == NULL) |
||
127 | { |
||
128 | impl = g_mutex_impl_new (); |
||
129 | if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl)) |
||
130 | g_mutex_impl_free (impl); |
||
131 | impl = mutex->p; |
||
132 | } |
||
133 | |||
134 | return impl; |
||
135 | } |
||
136 | |||
137 | |||
138 | /** |
||
139 | * g_mutex_init: |
||
140 | * @mutex: an uninitialized #GMutex |
||
141 | * |
||
142 | * Initializes a #GMutex so that it can be used. |
||
143 | * |
||
144 | * This function is useful to initialize a mutex that has been |
||
145 | * allocated on the stack, or as part of a larger structure. |
||
146 | * It is not necessary to initialize a mutex that has been |
||
147 | * statically allocated. |
||
148 | * |
||
149 | * |[<!-- language="C" --> |
||
150 | * typedef struct { |
||
151 | * GMutex m; |
||
152 | * ... |
||
153 | * } Blob; |
||
154 | * |
||
155 | * Blob *b; |
||
156 | * |
||
157 | * b = g_new (Blob, 1); |
||
158 | * g_mutex_init (&b->m); |
||
159 | * ]| |
||
160 | * |
||
161 | * To undo the effect of g_mutex_init() when a mutex is no longer |
||
162 | * needed, use g_mutex_clear(). |
||
163 | * |
||
164 | * Calling g_mutex_init() on an already initialized #GMutex leads |
||
165 | * to undefined behaviour. |
||
166 | * |
||
167 | * Since: 2.32 |
||
168 | */ |
||
169 | void |
||
170 | g_mutex_init (GMutex *mutex) |
||
171 | { |
||
172 | mutex->p = g_mutex_impl_new (); |
||
173 | } |
||
174 | |||
175 | /** |
||
176 | * g_mutex_clear: |
||
177 | * @mutex: an initialized #GMutex |
||
178 | * |
||
179 | * Frees the resources allocated to a mutex with g_mutex_init(). |
||
180 | * |
||
181 | * This function should not be used with a #GMutex that has been |
||
182 | * statically allocated. |
||
183 | * |
||
184 | * Calling g_mutex_clear() on a locked mutex leads to undefined |
||
185 | * behaviour. |
||
186 | * |
||
187 | * Sine: 2.32 |
||
188 | */ |
||
189 | void |
||
190 | g_mutex_clear (GMutex *mutex) |
||
191 | { |
||
192 | g_mutex_impl_free (mutex->p); |
||
193 | } |
||
194 | |||
195 | /** |
||
196 | * g_mutex_lock: |
||
197 | * @mutex: a #GMutex |
||
198 | * |
||
199 | * Locks @mutex. If @mutex is already locked by another thread, the |
||
200 | * current thread will block until @mutex is unlocked by the other |
||
201 | * thread. |
||
202 | * |
||
203 | * #GMutex is neither guaranteed to be recursive nor to be |
||
204 | * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has |
||
205 | * already been locked by the same thread results in undefined behaviour |
||
206 | * (including but not limited to deadlocks). |
||
207 | */ |
||
208 | void |
||
209 | g_mutex_lock (GMutex *mutex) |
||
210 | { |
||
211 | gint status; |
||
212 | |||
213 | if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0) |
||
214 | g_thread_abort (status, "pthread_mutex_lock"); |
||
215 | } |
||
216 | |||
217 | /** |
||
218 | * g_mutex_unlock: |
||
219 | * @mutex: a #GMutex |
||
220 | * |
||
221 | * Unlocks @mutex. If another thread is blocked in a g_mutex_lock() |
||
222 | * call for @mutex, it will become unblocked and can lock @mutex itself. |
||
223 | * |
||
224 | * Calling g_mutex_unlock() on a mutex that is not locked by the |
||
225 | * current thread leads to undefined behaviour. |
||
226 | */ |
||
227 | void |
||
228 | g_mutex_unlock (GMutex *mutex) |
||
229 | { |
||
230 | gint status; |
||
231 | |||
232 | if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0) |
||
233 | g_thread_abort (status, "pthread_mutex_unlock"); |
||
234 | } |
||
235 | |||
236 | /** |
||
237 | * g_mutex_trylock: |
||
238 | * @mutex: a #GMutex |
||
239 | * |
||
240 | * Tries to lock @mutex. If @mutex is already locked by another thread, |
||
241 | * it immediately returns %FALSE. Otherwise it locks @mutex and returns |
||
242 | * %TRUE. |
||
243 | * |
||
244 | * #GMutex is neither guaranteed to be recursive nor to be |
||
245 | * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has |
||
246 | * already been locked by the same thread results in undefined behaviour |
||
247 | * (including but not limited to deadlocks or arbitrary return values). |
||
248 | |||
249 | * Returns: %TRUE if @mutex could be locked |
||
250 | */ |
||
251 | gboolean |
||
252 | g_mutex_trylock (GMutex *mutex) |
||
253 | { |
||
254 | gint status; |
||
255 | |||
256 | if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0) |
||
257 | return TRUE; |
||
258 | |||
259 | if G_UNLIKELY (status != EBUSY) |
||
260 | g_thread_abort (status, "pthread_mutex_trylock"); |
||
261 | |||
262 | return FALSE; |
||
263 | } |
||
264 | |||
265 | #endif /* !defined(USE_NATIVE_MUTEX) */ |
||
266 | |||
267 | /* {{{1 GRecMutex */ |
||
268 | |||
269 | static pthread_mutex_t * |
||
270 | g_rec_mutex_impl_new (void) |
||
271 | { |
||
272 | pthread_mutexattr_t attr; |
||
273 | pthread_mutex_t *mutex; |
||
274 | |||
275 | mutex = malloc (sizeof (pthread_mutex_t)); |
||
276 | if G_UNLIKELY (mutex == NULL) |
||
277 | g_thread_abort (errno, "malloc"); |
||
278 | |||
279 | pthread_mutexattr_init (&attr); |
||
280 | pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); |
||
281 | pthread_mutex_init (mutex, &attr); |
||
282 | pthread_mutexattr_destroy (&attr); |
||
283 | |||
284 | return mutex; |
||
285 | } |
||
286 | |||
287 | static void |
||
288 | g_rec_mutex_impl_free (pthread_mutex_t *mutex) |
||
289 | { |
||
290 | pthread_mutex_destroy (mutex); |
||
291 | free (mutex); |
||
292 | } |
||
293 | |||
294 | static inline pthread_mutex_t * |
||
295 | g_rec_mutex_get_impl (GRecMutex *rec_mutex) |
||
296 | { |
||
297 | pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p); |
||
298 | |||
299 | if G_UNLIKELY (impl == NULL) |
||
300 | { |
||
301 | impl = g_rec_mutex_impl_new (); |
||
302 | if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl)) |
||
303 | g_rec_mutex_impl_free (impl); |
||
304 | impl = rec_mutex->p; |
||
305 | } |
||
306 | |||
307 | return impl; |
||
308 | } |
||
309 | |||
310 | /** |
||
311 | * g_rec_mutex_init: |
||
312 | * @rec_mutex: an uninitialized #GRecMutex |
||
313 | * |
||
314 | * Initializes a #GRecMutex so that it can be used. |
||
315 | * |
||
316 | * This function is useful to initialize a recursive mutex |
||
317 | * that has been allocated on the stack, or as part of a larger |
||
318 | * structure. |
||
319 | * |
||
320 | * It is not necessary to initialise a recursive mutex that has been |
||
321 | * statically allocated. |
||
322 | * |
||
323 | * |[<!-- language="C" --> |
||
324 | * typedef struct { |
||
325 | * GRecMutex m; |
||
326 | * ... |
||
327 | * } Blob; |
||
328 | * |
||
329 | * Blob *b; |
||
330 | * |
||
331 | * b = g_new (Blob, 1); |
||
332 | * g_rec_mutex_init (&b->m); |
||
333 | * ]| |
||
334 | * |
||
335 | * Calling g_rec_mutex_init() on an already initialized #GRecMutex |
||
336 | * leads to undefined behaviour. |
||
337 | * |
||
338 | * To undo the effect of g_rec_mutex_init() when a recursive mutex |
||
339 | * is no longer needed, use g_rec_mutex_clear(). |
||
340 | * |
||
341 | * Since: 2.32 |
||
342 | */ |
||
343 | void |
||
344 | g_rec_mutex_init (GRecMutex *rec_mutex) |
||
345 | { |
||
346 | rec_mutex->p = g_rec_mutex_impl_new (); |
||
347 | } |
||
348 | |||
349 | /** |
||
350 | * g_rec_mutex_clear: |
||
351 | * @rec_mutex: an initialized #GRecMutex |
||
352 | * |
||
353 | * Frees the resources allocated to a recursive mutex with |
||
354 | * g_rec_mutex_init(). |
||
355 | * |
||
356 | * This function should not be used with a #GRecMutex that has been |
||
357 | * statically allocated. |
||
358 | * |
||
359 | * Calling g_rec_mutex_clear() on a locked recursive mutex leads |
||
360 | * to undefined behaviour. |
||
361 | * |
||
362 | * Sine: 2.32 |
||
363 | */ |
||
364 | void |
||
365 | g_rec_mutex_clear (GRecMutex *rec_mutex) |
||
366 | { |
||
367 | g_rec_mutex_impl_free (rec_mutex->p); |
||
368 | } |
||
369 | |||
370 | /** |
||
371 | * g_rec_mutex_lock: |
||
372 | * @rec_mutex: a #GRecMutex |
||
373 | * |
||
374 | * Locks @rec_mutex. If @rec_mutex is already locked by another |
||
375 | * thread, the current thread will block until @rec_mutex is |
||
376 | * unlocked by the other thread. If @rec_mutex is already locked |
||
377 | * by the current thread, the 'lock count' of @rec_mutex is increased. |
||
378 | * The mutex will only become available again when it is unlocked |
||
379 | * as many times as it has been locked. |
||
380 | * |
||
381 | * Since: 2.32 |
||
382 | */ |
||
383 | void |
||
384 | g_rec_mutex_lock (GRecMutex *mutex) |
||
385 | { |
||
386 | pthread_mutex_lock (g_rec_mutex_get_impl (mutex)); |
||
387 | } |
||
388 | |||
389 | /** |
||
390 | * g_rec_mutex_unlock: |
||
391 | * @rec_mutex: a #GRecMutex |
||
392 | * |
||
393 | * Unlocks @rec_mutex. If another thread is blocked in a |
||
394 | * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked |
||
395 | * and can lock @rec_mutex itself. |
||
396 | * |
||
397 | * Calling g_rec_mutex_unlock() on a recursive mutex that is not |
||
398 | * locked by the current thread leads to undefined behaviour. |
||
399 | * |
||
400 | * Since: 2.32 |
||
401 | */ |
||
402 | void |
||
403 | g_rec_mutex_unlock (GRecMutex *rec_mutex) |
||
404 | { |
||
405 | pthread_mutex_unlock (rec_mutex->p); |
||
406 | } |
||
407 | |||
408 | /** |
||
409 | * g_rec_mutex_trylock: |
||
410 | * @rec_mutex: a #GRecMutex |
||
411 | * |
||
412 | * Tries to lock @rec_mutex. If @rec_mutex is already locked |
||
413 | * by another thread, it immediately returns %FALSE. Otherwise |
||
414 | * it locks @rec_mutex and returns %TRUE. |
||
415 | * |
||
416 | * Returns: %TRUE if @rec_mutex could be locked |
||
417 | * |
||
418 | * Since: 2.32 |
||
419 | */ |
||
420 | gboolean |
||
421 | g_rec_mutex_trylock (GRecMutex *rec_mutex) |
||
422 | { |
||
423 | if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0) |
||
424 | return FALSE; |
||
425 | |||
426 | return TRUE; |
||
427 | } |
||
428 | |||
429 | /* {{{1 GRWLock */ |
||
430 | |||
431 | static pthread_rwlock_t * |
||
432 | g_rw_lock_impl_new (void) |
||
433 | { |
||
434 | pthread_rwlock_t *rwlock; |
||
435 | gint status; |
||
436 | |||
437 | rwlock = malloc (sizeof (pthread_rwlock_t)); |
||
438 | if G_UNLIKELY (rwlock == NULL) |
||
439 | g_thread_abort (errno, "malloc"); |
||
440 | |||
441 | if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0) |
||
442 | g_thread_abort (status, "pthread_rwlock_init"); |
||
443 | |||
444 | return rwlock; |
||
445 | } |
||
446 | |||
447 | static void |
||
448 | g_rw_lock_impl_free (pthread_rwlock_t *rwlock) |
||
449 | { |
||
450 | pthread_rwlock_destroy (rwlock); |
||
451 | free (rwlock); |
||
452 | } |
||
453 | |||
454 | static inline pthread_rwlock_t * |
||
455 | g_rw_lock_get_impl (GRWLock *lock) |
||
456 | { |
||
457 | pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p); |
||
458 | |||
459 | if G_UNLIKELY (impl == NULL) |
||
460 | { |
||
461 | impl = g_rw_lock_impl_new (); |
||
462 | if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl)) |
||
463 | g_rw_lock_impl_free (impl); |
||
464 | impl = lock->p; |
||
465 | } |
||
466 | |||
467 | return impl; |
||
468 | } |
||
469 | |||
470 | /** |
||
471 | * g_rw_lock_init: |
||
472 | * @rw_lock: an uninitialized #GRWLock |
||
473 | * |
||
474 | * Initializes a #GRWLock so that it can be used. |
||
475 | * |
||
476 | * This function is useful to initialize a lock that has been |
||
477 | * allocated on the stack, or as part of a larger structure. It is not |
||
478 | * necessary to initialise a reader-writer lock that has been statically |
||
479 | * allocated. |
||
480 | * |
||
481 | * |[<!-- language="C" --> |
||
482 | * typedef struct { |
||
483 | * GRWLock l; |
||
484 | * ... |
||
485 | * } Blob; |
||
486 | * |
||
487 | * Blob *b; |
||
488 | * |
||
489 | * b = g_new (Blob, 1); |
||
490 | * g_rw_lock_init (&b->l); |
||
491 | * ]| |
||
492 | * |
||
493 | * To undo the effect of g_rw_lock_init() when a lock is no longer |
||
494 | * needed, use g_rw_lock_clear(). |
||
495 | * |
||
496 | * Calling g_rw_lock_init() on an already initialized #GRWLock leads |
||
497 | * to undefined behaviour. |
||
498 | * |
||
499 | * Since: 2.32 |
||
500 | */ |
||
501 | void |
||
502 | g_rw_lock_init (GRWLock *rw_lock) |
||
503 | { |
||
504 | rw_lock->p = g_rw_lock_impl_new (); |
||
505 | } |
||
506 | |||
507 | /** |
||
508 | * g_rw_lock_clear: |
||
509 | * @rw_lock: an initialized #GRWLock |
||
510 | * |
||
511 | * Frees the resources allocated to a lock with g_rw_lock_init(). |
||
512 | * |
||
513 | * This function should not be used with a #GRWLock that has been |
||
514 | * statically allocated. |
||
515 | * |
||
516 | * Calling g_rw_lock_clear() when any thread holds the lock |
||
517 | * leads to undefined behaviour. |
||
518 | * |
||
519 | * Sine: 2.32 |
||
520 | */ |
||
521 | void |
||
522 | g_rw_lock_clear (GRWLock *rw_lock) |
||
523 | { |
||
524 | g_rw_lock_impl_free (rw_lock->p); |
||
525 | } |
||
526 | |||
527 | /** |
||
528 | * g_rw_lock_writer_lock: |
||
529 | * @rw_lock: a #GRWLock |
||
530 | * |
||
531 | * Obtain a write lock on @rw_lock. If any thread already holds |
||
532 | * a read or write lock on @rw_lock, the current thread will block |
||
533 | * until all other threads have dropped their locks on @rw_lock. |
||
534 | * |
||
535 | * Since: 2.32 |
||
536 | */ |
||
537 | void |
||
538 | g_rw_lock_writer_lock (GRWLock *rw_lock) |
||
539 | { |
||
540 | pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock)); |
||
541 | } |
||
542 | |||
543 | /** |
||
544 | * g_rw_lock_writer_trylock: |
||
545 | * @rw_lock: a #GRWLock |
||
546 | * |
||
547 | * Tries to obtain a write lock on @rw_lock. If any other thread holds |
||
548 | * a read or write lock on @rw_lock, it immediately returns %FALSE. |
||
549 | * Otherwise it locks @rw_lock and returns %TRUE. |
||
550 | * |
||
551 | * Returns: %TRUE if @rw_lock could be locked |
||
552 | * |
||
553 | * Since: 2.32 |
||
554 | */ |
||
555 | gboolean |
||
556 | g_rw_lock_writer_trylock (GRWLock *rw_lock) |
||
557 | { |
||
558 | if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0) |
||
559 | return FALSE; |
||
560 | |||
561 | return TRUE; |
||
562 | } |
||
563 | |||
564 | /** |
||
565 | * g_rw_lock_writer_unlock: |
||
566 | * @rw_lock: a #GRWLock |
||
567 | * |
||
568 | * Release a write lock on @rw_lock. |
||
569 | * |
||
570 | * Calling g_rw_lock_writer_unlock() on a lock that is not held |
||
571 | * by the current thread leads to undefined behaviour. |
||
572 | * |
||
573 | * Since: 2.32 |
||
574 | */ |
||
575 | void |
||
576 | g_rw_lock_writer_unlock (GRWLock *rw_lock) |
||
577 | { |
||
578 | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
||
579 | } |
||
580 | |||
581 | /** |
||
582 | * g_rw_lock_reader_lock: |
||
583 | * @rw_lock: a #GRWLock |
||
584 | * |
||
585 | * Obtain a read lock on @rw_lock. If another thread currently holds |
||
586 | * the write lock on @rw_lock or blocks waiting for it, the current |
||
587 | * thread will block. Read locks can be taken recursively. |
||
588 | * |
||
589 | * It is implementation-defined how many threads are allowed to |
||
590 | * hold read locks on the same lock simultaneously. |
||
591 | * |
||
592 | * Since: 2.32 |
||
593 | */ |
||
594 | void |
||
595 | g_rw_lock_reader_lock (GRWLock *rw_lock) |
||
596 | { |
||
597 | pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock)); |
||
598 | } |
||
599 | |||
600 | /** |
||
601 | * g_rw_lock_reader_trylock: |
||
602 | * @rw_lock: a #GRWLock |
||
603 | * |
||
604 | * Tries to obtain a read lock on @rw_lock and returns %TRUE if |
||
605 | * the read lock was successfully obtained. Otherwise it |
||
606 | * returns %FALSE. |
||
607 | * |
||
608 | * Returns: %TRUE if @rw_lock could be locked |
||
609 | * |
||
610 | * Since: 2.32 |
||
611 | */ |
||
612 | gboolean |
||
613 | g_rw_lock_reader_trylock (GRWLock *rw_lock) |
||
614 | { |
||
615 | if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0) |
||
616 | return FALSE; |
||
617 | |||
618 | return TRUE; |
||
619 | } |
||
620 | |||
621 | /** |
||
622 | * g_rw_lock_reader_unlock: |
||
623 | * @rw_lock: a #GRWLock |
||
624 | * |
||
625 | * Release a read lock on @rw_lock. |
||
626 | * |
||
627 | * Calling g_rw_lock_reader_unlock() on a lock that is not held |
||
628 | * by the current thread leads to undefined behaviour. |
||
629 | * |
||
630 | * Since: 2.32 |
||
631 | */ |
||
632 | void |
||
633 | g_rw_lock_reader_unlock (GRWLock *rw_lock) |
||
634 | { |
||
635 | pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock)); |
||
636 | } |
||
637 | |||
638 | /* {{{1 GCond */ |
||
639 | |||
640 | #if !defined(USE_NATIVE_MUTEX) |
||
641 | |||
642 | static pthread_cond_t * |
||
643 | g_cond_impl_new (void) |
||
644 | { |
||
645 | pthread_condattr_t attr; |
||
646 | pthread_cond_t *cond; |
||
647 | gint status; |
||
648 | |||
649 | pthread_condattr_init (&attr); |
||
650 | |||
651 | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
||
652 | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
||
653 | if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0) |
||
654 | g_thread_abort (status, "pthread_condattr_setclock"); |
||
655 | #else |
||
656 | #error Cannot support GCond on your platform. |
||
657 | #endif |
||
658 | |||
659 | cond = malloc (sizeof (pthread_cond_t)); |
||
660 | if G_UNLIKELY (cond == NULL) |
||
661 | g_thread_abort (errno, "malloc"); |
||
662 | |||
663 | if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0) |
||
664 | g_thread_abort (status, "pthread_cond_init"); |
||
665 | |||
666 | pthread_condattr_destroy (&attr); |
||
667 | |||
668 | return cond; |
||
669 | } |
||
670 | |||
671 | static void |
||
672 | g_cond_impl_free (pthread_cond_t *cond) |
||
673 | { |
||
674 | pthread_cond_destroy (cond); |
||
675 | free (cond); |
||
676 | } |
||
677 | |||
678 | static inline pthread_cond_t * |
||
679 | g_cond_get_impl (GCond *cond) |
||
680 | { |
||
681 | pthread_cond_t *impl = g_atomic_pointer_get (&cond->p); |
||
682 | |||
683 | if G_UNLIKELY (impl == NULL) |
||
684 | { |
||
685 | impl = g_cond_impl_new (); |
||
686 | if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl)) |
||
687 | g_cond_impl_free (impl); |
||
688 | impl = cond->p; |
||
689 | } |
||
690 | |||
691 | return impl; |
||
692 | } |
||
693 | |||
694 | /** |
||
695 | * g_cond_init: |
||
696 | * @cond: an uninitialized #GCond |
||
697 | * |
||
698 | * Initialises a #GCond so that it can be used. |
||
699 | * |
||
700 | * This function is useful to initialise a #GCond that has been |
||
701 | * allocated as part of a larger structure. It is not necessary to |
||
702 | * initialise a #GCond that has been statically allocated. |
||
703 | * |
||
704 | * To undo the effect of g_cond_init() when a #GCond is no longer |
||
705 | * needed, use g_cond_clear(). |
||
706 | * |
||
707 | * Calling g_cond_init() on an already-initialised #GCond leads |
||
708 | * to undefined behaviour. |
||
709 | * |
||
710 | * Since: 2.32 |
||
711 | */ |
||
712 | void |
||
713 | g_cond_init (GCond *cond) |
||
714 | { |
||
715 | cond->p = g_cond_impl_new (); |
||
716 | } |
||
717 | |||
718 | /** |
||
719 | * g_cond_clear: |
||
720 | * @cond: an initialised #GCond |
||
721 | * |
||
722 | * Frees the resources allocated to a #GCond with g_cond_init(). |
||
723 | * |
||
724 | * This function should not be used with a #GCond that has been |
||
725 | * statically allocated. |
||
726 | * |
||
727 | * Calling g_cond_clear() for a #GCond on which threads are |
||
728 | * blocking leads to undefined behaviour. |
||
729 | * |
||
730 | * Since: 2.32 |
||
731 | */ |
||
732 | void |
||
733 | g_cond_clear (GCond *cond) |
||
734 | { |
||
735 | g_cond_impl_free (cond->p); |
||
736 | } |
||
737 | |||
738 | /** |
||
739 | * g_cond_wait: |
||
740 | * @cond: a #GCond |
||
741 | * @mutex: a #GMutex that is currently locked |
||
742 | * |
||
743 | * Atomically releases @mutex and waits until @cond is signalled. |
||
744 | * When this function returns, @mutex is locked again and owned by the |
||
745 | * calling thread. |
||
746 | * |
||
747 | * When using condition variables, it is possible that a spurious wakeup |
||
748 | * may occur (ie: g_cond_wait() returns even though g_cond_signal() was |
||
749 | * not called). It's also possible that a stolen wakeup may occur. |
||
750 | * This is when g_cond_signal() is called, but another thread acquires |
||
751 | * @mutex before this thread and modifies the state of the program in |
||
752 | * such a way that when g_cond_wait() is able to return, the expected |
||
753 | * condition is no longer met. |
||
754 | * |
||
755 | * For this reason, g_cond_wait() must always be used in a loop. See |
||
756 | * the documentation for #GCond for a complete example. |
||
757 | **/ |
||
758 | void |
||
759 | g_cond_wait (GCond *cond, |
||
760 | GMutex *mutex) |
||
761 | { |
||
762 | gint status; |
||
763 | |||
764 | if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0) |
||
765 | g_thread_abort (status, "pthread_cond_wait"); |
||
766 | } |
||
767 | |||
768 | /** |
||
769 | * g_cond_signal: |
||
770 | * @cond: a #GCond |
||
771 | * |
||
772 | * If threads are waiting for @cond, at least one of them is unblocked. |
||
773 | * If no threads are waiting for @cond, this function has no effect. |
||
774 | * It is good practice to hold the same lock as the waiting thread |
||
775 | * while calling this function, though not required. |
||
776 | */ |
||
777 | void |
||
778 | g_cond_signal (GCond *cond) |
||
779 | { |
||
780 | gint status; |
||
781 | |||
782 | if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0) |
||
783 | g_thread_abort (status, "pthread_cond_signal"); |
||
784 | } |
||
785 | |||
786 | /** |
||
787 | * g_cond_broadcast: |
||
788 | * @cond: a #GCond |
||
789 | * |
||
790 | * If threads are waiting for @cond, all of them are unblocked. |
||
791 | * If no threads are waiting for @cond, this function has no effect. |
||
792 | * It is good practice to lock the same mutex as the waiting threads |
||
793 | * while calling this function, though not required. |
||
794 | */ |
||
795 | void |
||
796 | g_cond_broadcast (GCond *cond) |
||
797 | { |
||
798 | gint status; |
||
799 | |||
800 | if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0) |
||
801 | g_thread_abort (status, "pthread_cond_broadcast"); |
||
802 | } |
||
803 | |||
804 | /** |
||
805 | * g_cond_wait_until: |
||
806 | * @cond: a #GCond |
||
807 | * @mutex: a #GMutex that is currently locked |
||
808 | * @end_time: the monotonic time to wait until |
||
809 | * |
||
810 | * Waits until either @cond is signalled or @end_time has passed. |
||
811 | * |
||
812 | * As with g_cond_wait() it is possible that a spurious or stolen wakeup |
||
813 | * could occur. For that reason, waiting on a condition variable should |
||
814 | * always be in a loop, based on an explicitly-checked predicate. |
||
815 | * |
||
816 | * %TRUE is returned if the condition variable was signalled (or in the |
||
817 | * case of a spurious wakeup). %FALSE is returned if @end_time has |
||
818 | * passed. |
||
819 | * |
||
820 | * The following code shows how to correctly perform a timed wait on a |
||
821 | * condition variable (extending the example presented in the |
||
822 | * documentation for #GCond): |
||
823 | * |
||
824 | * |[<!-- language="C" --> |
||
825 | * gpointer |
||
826 | * pop_data_timed (void) |
||
827 | * { |
||
828 | * gint64 end_time; |
||
829 | * gpointer data; |
||
830 | * |
||
831 | * g_mutex_lock (&data_mutex); |
||
832 | * |
||
833 | * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND; |
||
834 | * while (!current_data) |
||
835 | * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time)) |
||
836 | * { |
||
837 | * // timeout has passed. |
||
838 | * g_mutex_unlock (&data_mutex); |
||
839 | * return NULL; |
||
840 | * } |
||
841 | * |
||
842 | * // there is data for us |
||
843 | * data = current_data; |
||
844 | * current_data = NULL; |
||
845 | * |
||
846 | * g_mutex_unlock (&data_mutex); |
||
847 | * |
||
848 | * return data; |
||
849 | * } |
||
850 | * ]| |
||
851 | * |
||
852 | * Notice that the end time is calculated once, before entering the |
||
853 | * loop and reused. This is the motivation behind the use of absolute |
||
854 | * time on this API -- if a relative time of 5 seconds were passed |
||
855 | * directly to the call and a spurious wakeup occurred, the program would |
||
856 | * have to start over waiting again (which would lead to a total wait |
||
857 | * time of more than 5 seconds). |
||
858 | * |
||
859 | * Returns: %TRUE on a signal, %FALSE on a timeout |
||
860 | * Since: 2.32 |
||
861 | **/ |
||
862 | gboolean |
||
863 | g_cond_wait_until (GCond *cond, |
||
864 | GMutex *mutex, |
||
865 | gint64 end_time) |
||
866 | { |
||
867 | struct timespec ts; |
||
868 | gint status; |
||
869 | |||
870 | #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP |
||
871 | /* end_time is given relative to the monotonic clock as returned by |
||
872 | * g_get_monotonic_time(). |
||
873 | * |
||
874 | * Since this pthreads wants the relative time, convert it back again. |
||
875 | */ |
||
876 | { |
||
877 | gint64 now = g_get_monotonic_time (); |
||
878 | gint64 relative; |
||
879 | |||
880 | if (end_time <= now) |
||
881 | return FALSE; |
||
882 | |||
883 | relative = end_time - now; |
||
884 | |||
885 | ts.tv_sec = relative / 1000000; |
||
886 | ts.tv_nsec = (relative % 1000000) * 1000; |
||
887 | |||
888 | if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
||
889 | return TRUE; |
||
890 | } |
||
891 | #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC) |
||
892 | /* This is the exact check we used during init to set the clock to |
||
893 | * monotonic, so if we're in this branch, timedwait() will already be |
||
894 | * expecting a monotonic clock. |
||
895 | */ |
||
896 | { |
||
897 | ts.tv_sec = end_time / 1000000; |
||
898 | ts.tv_nsec = (end_time % 1000000) * 1000; |
||
899 | |||
900 | if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0) |
||
901 | return TRUE; |
||
902 | } |
||
903 | #else |
||
904 | #error Cannot support GCond on your platform. |
||
905 | #endif |
||
906 | |||
907 | if G_UNLIKELY (status != ETIMEDOUT) |
||
908 | g_thread_abort (status, "pthread_cond_timedwait"); |
||
909 | |||
910 | return FALSE; |
||
911 | } |
||
912 | |||
913 | #endif /* defined(USE_NATIVE_MUTEX) */ |
||
914 | |||
915 | /* {{{1 GPrivate */ |
||
916 | |||
917 | /** |
||
918 | * GPrivate: |
||
919 | * |
||
920 | * The #GPrivate struct is an opaque data structure to represent a |
||
921 | * thread-local data key. It is approximately equivalent to the |
||
922 | * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to |
||
923 | * TlsSetValue()/TlsGetValue() on Windows. |
||
924 | * |
||
925 | * If you don't already know why you might want this functionality, |
||
926 | * then you probably don't need it. |
||
927 | * |
||
928 | * #GPrivate is a very limited resource (as far as 128 per program, |
||
929 | * shared between all libraries). It is also not possible to destroy a |
||
930 | * #GPrivate after it has been used. As such, it is only ever acceptable |
||
931 | * to use #GPrivate in static scope, and even then sparingly so. |
||
932 | * |
||
933 | * See G_PRIVATE_INIT() for a couple of examples. |
||
934 | * |
||
935 | * The #GPrivate structure should be considered opaque. It should only |
||
936 | * be accessed via the g_private_ functions. |
||
937 | */ |
||
938 | |||
939 | /** |
||
940 | * G_PRIVATE_INIT: |
||
941 | * @notify: a #GDestroyNotify |
||
942 | * |
||
943 | * A macro to assist with the static initialisation of a #GPrivate. |
||
944 | * |
||
945 | * This macro is useful for the case that a #GDestroyNotify function |
||
946 | * should be associated the key. This is needed when the key will be |
||
947 | * used to point at memory that should be deallocated when the thread |
||
948 | * exits. |
||
949 | * |
||
950 | * Additionally, the #GDestroyNotify will also be called on the previous |
||
951 | * value stored in the key when g_private_replace() is used. |
||
952 | * |
||
953 | * If no #GDestroyNotify is needed, then use of this macro is not |
||
954 | * required -- if the #GPrivate is declared in static scope then it will |
||
955 | * be properly initialised by default (ie: to all zeros). See the |
||
956 | * examples below. |
||
957 | * |
||
958 | * |[<!-- language="C" --> |
||
959 | * static GPrivate name_key = G_PRIVATE_INIT (g_free); |
||
960 | * |
||
961 | * // return value should not be freed |
||
962 | * const gchar * |
||
963 | * get_local_name (void) |
||
964 | * { |
||
965 | * return g_private_get (&name_key); |
||
966 | * } |
||
967 | * |
||
968 | * void |
||
969 | * set_local_name (const gchar *name) |
||
970 | * { |
||
971 | * g_private_replace (&name_key, g_strdup (name)); |
||
972 | * } |
||
973 | * |
||
974 | * |
||
975 | * static GPrivate count_key; // no free function |
||
976 | * |
||
977 | * gint |
||
978 | * get_local_count (void) |
||
979 | * { |
||
980 | * return GPOINTER_TO_INT (g_private_get (&count_key)); |
||
981 | * } |
||
982 | * |
||
983 | * void |
||
984 | * set_local_count (gint count) |
||
985 | * { |
||
986 | * g_private_set (&count_key, GINT_TO_POINTER (count)); |
||
987 | * } |
||
988 | * ]| |
||
989 | * |
||
990 | * Since: 2.32 |
||
991 | **/ |
||
992 | |||
993 | static pthread_key_t * |
||
994 | g_private_impl_new (GDestroyNotify notify) |
||
995 | { |
||
996 | pthread_key_t *key; |
||
997 | gint status; |
||
998 | |||
999 | key = malloc (sizeof (pthread_key_t)); |
||
1000 | if G_UNLIKELY (key == NULL) |
||
1001 | g_thread_abort (errno, "malloc"); |
||
1002 | status = pthread_key_create (key, notify); |
||
1003 | if G_UNLIKELY (status != 0) |
||
1004 | g_thread_abort (status, "pthread_key_create"); |
||
1005 | |||
1006 | return key; |
||
1007 | } |
||
1008 | |||
1009 | static void |
||
1010 | g_private_impl_free (pthread_key_t *key) |
||
1011 | { |
||
1012 | gint status; |
||
1013 | |||
1014 | status = pthread_key_delete (*key); |
||
1015 | if G_UNLIKELY (status != 0) |
||
1016 | g_thread_abort (status, "pthread_key_delete"); |
||
1017 | free (key); |
||
1018 | } |
||
1019 | |||
1020 | static inline pthread_key_t * |
||
1021 | g_private_get_impl (GPrivate *key) |
||
1022 | { |
||
1023 | pthread_key_t *impl = g_atomic_pointer_get (&key->p); |
||
1024 | |||
1025 | if G_UNLIKELY (impl == NULL) |
||
1026 | { |
||
1027 | impl = g_private_impl_new (key->notify); |
||
1028 | if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl)) |
||
1029 | { |
||
1030 | g_private_impl_free (impl); |
||
1031 | impl = key->p; |
||
1032 | } |
||
1033 | } |
||
1034 | |||
1035 | return impl; |
||
1036 | } |
||
1037 | |||
1038 | /** |
||
1039 | * g_private_get: |
||
1040 | * @key: a #GPrivate |
||
1041 | * |
||
1042 | * Returns the current value of the thread local variable @key. |
||
1043 | * |
||
1044 | * If the value has not yet been set in this thread, %NULL is returned. |
||
1045 | * Values are never copied between threads (when a new thread is |
||
1046 | * created, for example). |
||
1047 | * |
||
1048 | * Returns: the thread-local value |
||
1049 | */ |
||
1050 | gpointer |
||
1051 | g_private_get (GPrivate *key) |
||
1052 | { |
||
1053 | /* quote POSIX: No errors are returned from pthread_getspecific(). */ |
||
1054 | return pthread_getspecific (*g_private_get_impl (key)); |
||
1055 | } |
||
1056 | |||
1057 | /** |
||
1058 | * g_private_set: |
||
1059 | * @key: a #GPrivate |
||
1060 | * @value: the new value |
||
1061 | * |
||
1062 | * Sets the thread local variable @key to have the value @value in the |
||
1063 | * current thread. |
||
1064 | * |
||
1065 | * This function differs from g_private_replace() in the following way: |
||
1066 | * the #GDestroyNotify for @key is not called on the old value. |
||
1067 | */ |
||
1068 | void |
||
1069 | g_private_set (GPrivate *key, |
||
1070 | gpointer value) |
||
1071 | { |
||
1072 | gint status; |
||
1073 | |||
1074 | if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0) |
||
1075 | g_thread_abort (status, "pthread_setspecific"); |
||
1076 | } |
||
1077 | |||
1078 | /** |
||
1079 | * g_private_replace: |
||
1080 | * @key: a #GPrivate |
||
1081 | * @value: the new value |
||
1082 | * |
||
1083 | * Sets the thread local variable @key to have the value @value in the |
||
1084 | * current thread. |
||
1085 | * |
||
1086 | * This function differs from g_private_set() in the following way: if |
||
1087 | * the previous value was non-%NULL then the #GDestroyNotify handler for |
||
1088 | * @key is run on it. |
||
1089 | * |
||
1090 | * Since: 2.32 |
||
1091 | **/ |
||
1092 | void |
||
1093 | g_private_replace (GPrivate *key, |
||
1094 | gpointer value) |
||
1095 | { |
||
1096 | pthread_key_t *impl = g_private_get_impl (key); |
||
1097 | gpointer old; |
||
1098 | gint status; |
||
1099 | |||
1100 | old = pthread_getspecific (*impl); |
||
1101 | if (old && key->notify) |
||
1102 | key->notify (old); |
||
1103 | |||
1104 | if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0) |
||
1105 | g_thread_abort (status, "pthread_setspecific"); |
||
1106 | } |
||
1107 | |||
1108 | /* {{{1 GThread */ |
||
1109 | |||
1110 | #define posix_check_err(err, name) G_STMT_START{ \ |
||
1111 | int error = (err); \ |
||
1112 | if (error) \ |
||
1113 | g_error ("file %s: line %d (%s): error '%s' during '%s'", \ |
||
1114 | __FILE__, __LINE__, G_STRFUNC, \ |
||
1115 | g_strerror (error), name); \ |
||
1116 | }G_STMT_END |
||
1117 | |||
1118 | #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd) |
||
1119 | |||
1120 | typedef struct |
||
1121 | { |
||
1122 | GRealThread thread; |
||
1123 | |||
1124 | pthread_t system_thread; |
||
1125 | gboolean joined; |
||
1126 | GMutex lock; |
||
1127 | } GThreadPosix; |
||
1128 | |||
1129 | void |
||
1130 | g_system_thread_free (GRealThread *thread) |
||
1131 | { |
||
1132 | GThreadPosix *pt = (GThreadPosix *) thread; |
||
1133 | |||
1134 | if (!pt->joined) |
||
1135 | pthread_detach (pt->system_thread); |
||
1136 | |||
1137 | g_mutex_clear (&pt->lock); |
||
1138 | |||
1139 | g_slice_free (GThreadPosix, pt); |
||
1140 | } |
||
1141 | |||
1142 | GRealThread * |
||
1143 | g_system_thread_new (GThreadFunc thread_func, |
||
1144 | gulong stack_size, |
||
1145 | GError **error) |
||
1146 | { |
||
1147 | GThreadPosix *thread; |
||
1148 | pthread_attr_t attr; |
||
1149 | gint ret; |
||
1150 | |||
1151 | thread = g_slice_new0 (GThreadPosix); |
||
1152 | |||
1153 | posix_check_cmd (pthread_attr_init (&attr)); |
||
1154 | |||
1155 | #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE |
||
1156 | if (stack_size) |
||
1157 | { |
||
1158 | #ifdef _SC_THREAD_STACK_MIN |
||
1159 | long min_stack_size = sysconf (_SC_THREAD_STACK_MIN); |
||
1160 | if (min_stack_size >= 0) |
||
1161 | stack_size = MAX (min_stack_size, stack_size); |
||
1162 | #endif /* _SC_THREAD_STACK_MIN */ |
||
1163 | /* No error check here, because some systems can't do it and |
||
1164 | * we simply don't want threads to fail because of that. */ |
||
1165 | pthread_attr_setstacksize (&attr, stack_size); |
||
1166 | } |
||
1167 | #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */ |
||
1168 | |||
1169 | ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))thread_func, thread); |
||
1170 | |||
1171 | posix_check_cmd (pthread_attr_destroy (&attr)); |
||
1172 | |||
1173 | if (ret == EAGAIN) |
||
1174 | { |
||
1175 | g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN, |
||
1176 | "Error creating thread: %s", g_strerror (ret)); |
||
1177 | g_slice_free (GThreadPosix, thread); |
||
1178 | return NULL; |
||
1179 | } |
||
1180 | |||
1181 | posix_check_err (ret, "pthread_create"); |
||
1182 | |||
1183 | g_mutex_init (&thread->lock); |
||
1184 | |||
1185 | return (GRealThread *) thread; |
||
1186 | } |
||
1187 | |||
1188 | /** |
||
1189 | * g_thread_yield: |
||
1190 | * |
||
1191 | * Causes the calling thread to voluntarily relinquish the CPU, so |
||
1192 | * that other threads can run. |
||
1193 | * |
||
1194 | * This function is often used as a method to make busy wait less evil. |
||
1195 | */ |
||
1196 | void |
||
1197 | g_thread_yield (void) |
||
1198 | { |
||
1199 | sched_yield (); |
||
1200 | } |
||
1201 | |||
1202 | void |
||
1203 | g_system_thread_wait (GRealThread *thread) |
||
1204 | { |
||
1205 | GThreadPosix *pt = (GThreadPosix *) thread; |
||
1206 | |||
1207 | g_mutex_lock (&pt->lock); |
||
1208 | |||
1209 | if (!pt->joined) |
||
1210 | { |
||
1211 | posix_check_cmd (pthread_join (pt->system_thread, NULL)); |
||
1212 | pt->joined = TRUE; |
||
1213 | } |
||
1214 | |||
1215 | g_mutex_unlock (&pt->lock); |
||
1216 | } |
||
1217 | |||
1218 | void |
||
1219 | g_system_thread_exit (void) |
||
1220 | { |
||
1221 | pthread_exit (NULL); |
||
1222 | } |
||
1223 | |||
1224 | void |
||
1225 | g_system_thread_set_name (const gchar *name) |
||
1226 | { |
||
1227 | #if defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) |
||
1228 | pthread_setname_np (pthread_self(), name); /* on Linux and Solaris */ |
||
1229 | #elif defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) |
||
1230 | pthread_setname_np (name); /* on OS X and iOS */ |
||
1231 | #endif |
||
1232 | } |
||
1233 | |||
1234 | /* {{{1 GMutex and GCond futex implementation */ |
||
1235 | |||
1236 | #if defined(USE_NATIVE_MUTEX) |
||
1237 | |||
1238 | #include <linux/futex.h> |
||
1239 | #include <sys/syscall.h> |
||
1240 | |||
1241 | #ifndef FUTEX_WAIT_PRIVATE |
||
1242 | #define FUTEX_WAIT_PRIVATE FUTEX_WAIT |
||
1243 | #define FUTEX_WAKE_PRIVATE FUTEX_WAKE |
||
1244 | #endif |
||
1245 | |||
1246 | /* We should expand the set of operations available in gatomic once we |
||
1247 | * have better C11 support in GCC in common distributions (ie: 4.9). |
||
1248 | * |
||
1249 | * Before then, let's define a couple of useful things for our own |
||
1250 | * purposes... |
||
1251 | */ |
||
1252 | |||
1253 | #define exchange_acquire(ptr, new) \ |
||
1254 | __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE) |
||
1255 | #define compare_exchange_acquire(ptr, old, new) \ |
||
1256 | __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) |
||
1257 | |||
1258 | #define exchange_release(ptr, new) \ |
||
1259 | __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE) |
||
1260 | #define store_release(ptr, new) \ |
||
1261 | __atomic_store_4((ptr), (new), __ATOMIC_RELEASE) |
||
1262 | |||
1263 | /* Our strategy for the mutex is pretty simple: |
||
1264 | * |
||
1265 | * 0: not in use |
||
1266 | * |
||
1267 | * 1: acquired by one thread only, no contention |
||
1268 | * |
||
1269 | * > 1: contended |
||
1270 | * |
||
1271 | * |
||
1272 | * As such, attempting to acquire the lock should involve an increment. |
||
1273 | * If we find that the previous value was 0 then we can return |
||
1274 | * immediately. |
||
1275 | * |
||
1276 | * On unlock, we always store 0 to indicate that the lock is available. |
||
1277 | * If the value there was 1 before then we didn't have contention and |
||
1278 | * can return immediately. If the value was something other than 1 then |
||
1279 | * we have the contended case and need to wake a waiter. |
||
1280 | * |
||
1281 | * If it was not 0 then there is another thread holding it and we must |
||
1282 | * wait. We must always ensure that we mark a value >1 while we are |
||
1283 | * waiting in order to instruct the holder to do a wake operation on |
||
1284 | * unlock. |
||
1285 | */ |
||
1286 | |||
1287 | void |
||
1288 | g_mutex_init (GMutex *mutex) |
||
1289 | { |
||
1290 | mutex->i[0] = 0; |
||
1291 | } |
||
1292 | |||
1293 | void |
||
1294 | g_mutex_clear (GMutex *mutex) |
||
1295 | { |
||
1296 | if G_UNLIKELY (mutex->i[0] != 0) |
||
1297 | { |
||
1298 | fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n"); |
||
1299 | abort (); |
||
1300 | } |
||
1301 | } |
||
1302 | |||
1303 | static void __attribute__((noinline)) |
||
1304 | g_mutex_lock_slowpath (GMutex *mutex) |
||
1305 | { |
||
1306 | /* Set to 2 to indicate contention. If it was zero before then we |
||
1307 | * just acquired the lock. |
||
1308 | * |
||
1309 | * Otherwise, sleep for as long as the 2 remains... |
||
1310 | */ |
||
1311 | while (exchange_acquire (&mutex->i[0], 2) != 0) |
||
1312 | syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) 2, NULL); |
||
1313 | } |
||
1314 | |||
1315 | static void __attribute__((noinline)) |
||
1316 | g_mutex_unlock_slowpath (GMutex *mutex, |
||
1317 | guint prev) |
||
1318 | { |
||
1319 | /* We seem to get better code for the uncontended case by splitting |
||
1320 | * this out... |
||
1321 | */ |
||
1322 | if G_UNLIKELY (prev == 0) |
||
1323 | { |
||
1324 | fprintf (stderr, "Attempt to unlock mutex that was not locked\n"); |
||
1325 | abort (); |
||
1326 | } |
||
1327 | |||
1328 | syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
||
1329 | } |
||
1330 | |||
1331 | void |
||
1332 | g_mutex_lock (GMutex *mutex) |
||
1333 | { |
||
1334 | /* 0 -> 1 and we're done. Anything else, and we need to wait... */ |
||
1335 | if G_UNLIKELY (g_atomic_int_add (&mutex->i[0], 1) != 0) |
||
1336 | g_mutex_lock_slowpath (mutex); |
||
1337 | } |
||
1338 | |||
1339 | void |
||
1340 | g_mutex_unlock (GMutex *mutex) |
||
1341 | { |
||
1342 | guint prev; |
||
1343 | |||
1344 | prev = exchange_release (&mutex->i[0], 0); |
||
1345 | |||
1346 | /* 1-> 0 and we're done. Anything else and we need to signal... */ |
||
1347 | if G_UNLIKELY (prev != 1) |
||
1348 | g_mutex_unlock_slowpath (mutex, prev); |
||
1349 | } |
||
1350 | |||
1351 | gboolean |
||
1352 | g_mutex_trylock (GMutex *mutex) |
||
1353 | { |
||
1354 | guint zero = 0; |
||
1355 | |||
1356 | /* We don't want to touch the value at all unless we can move it from |
||
1357 | * exactly 0 to 1. |
||
1358 | */ |
||
1359 | return compare_exchange_acquire (&mutex->i[0], &zero, 1); |
||
1360 | } |
||
1361 | |||
1362 | /* Condition variables are implemented in a rather simple way as well. |
||
1363 | * In many ways, futex() as an abstraction is even more ideally suited |
||
1364 | * to condition variables than it is to mutexes. |
||
1365 | * |
||
1366 | * We store a generation counter. We sample it with the lock held and |
||
1367 | * unlock before sleeping on the futex. |
||
1368 | * |
||
1369 | * Signalling simply involves increasing the counter and making the |
||
1370 | * appropriate futex call. |
||
1371 | * |
||
1372 | * The only thing that is the slightest bit complicated is timed waits |
||
1373 | * because we must convert our absolute time to relative. |
||
1374 | */ |
||
1375 | |||
1376 | void |
||
1377 | g_cond_init (GCond *cond) |
||
1378 | { |
||
1379 | cond->i[0] = 0; |
||
1380 | } |
||
1381 | |||
1382 | void |
||
1383 | g_cond_clear (GCond *cond) |
||
1384 | { |
||
1385 | } |
||
1386 | |||
1387 | void |
||
1388 | g_cond_wait (GCond *cond, |
||
1389 | GMutex *mutex) |
||
1390 | { |
||
1391 | guint sampled = g_atomic_int_get (&cond->i[0]); |
||
1392 | |||
1393 | g_mutex_unlock (mutex); |
||
1394 | syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL); |
||
1395 | g_mutex_lock (mutex); |
||
1396 | } |
||
1397 | |||
1398 | void |
||
1399 | g_cond_signal (GCond *cond) |
||
1400 | { |
||
1401 | g_atomic_int_inc (&cond->i[0]); |
||
1402 | |||
1403 | syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL); |
||
1404 | } |
||
1405 | |||
1406 | void |
||
1407 | g_cond_broadcast (GCond *cond) |
||
1408 | { |
||
1409 | g_atomic_int_inc (&cond->i[0]); |
||
1410 | |||
1411 | syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL); |
||
1412 | } |
||
1413 | |||
1414 | gboolean |
||
1415 | g_cond_wait_until (GCond *cond, |
||
1416 | GMutex *mutex, |
||
1417 | gint64 end_time) |
||
1418 | { |
||
1419 | struct timespec now; |
||
1420 | struct timespec span; |
||
1421 | guint sampled; |
||
1422 | int res; |
||
1423 | |||
1424 | if (end_time < 0) |
||
1425 | return FALSE; |
||
1426 | |||
1427 | clock_gettime (CLOCK_MONOTONIC, &now); |
||
1428 | span.tv_sec = (end_time / 1000000) - now.tv_sec; |
||
1429 | span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec; |
||
1430 | if (span.tv_nsec < 0) |
||
1431 | { |
||
1432 | span.tv_nsec += 1000000000; |
||
1433 | span.tv_sec--; |
||
1434 | } |
||
1435 | |||
1436 | if (span.tv_sec < 0) |
||
1437 | return FALSE; |
||
1438 | |||
1439 | sampled = cond->i[0]; |
||
1440 | g_mutex_unlock (mutex); |
||
1441 | res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span); |
||
1442 | g_mutex_lock (mutex); |
||
1443 | |||
1444 | return (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE; |
||
1445 | } |
||
1446 | |||
1447 | #endif |
||
1448 | |||
1449 | /* {{{1 Epilogue */ |
||
1450 | /* vim:set foldmethod=marker: */ |