summaryrefslogtreecommitdiff
path: root/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2006-01-06 00:56:35 +0000
committerMike Frysinger <vapier@gentoo.org>2006-01-06 00:56:35 +0000
commit37cafa773b0bbebc9aa820f92ef5949a71d2dfe4 (patch)
tree5c415fc421104b42e74aea13abe10db0ffbfdef5 /libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
parentea35ab7829ae1aaae2b24408a7281ee94ce90478 (diff)
update libc lock functions to new pthread forwarding
Diffstat (limited to 'libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h')
-rw-r--r--libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h112
1 files changed, 88 insertions, 24 deletions
diff --git a/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h b/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
index e2b267d32..c30d00964 100644
--- a/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
+++ b/libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
@@ -23,6 +23,10 @@
#include <pthread.h>
+#if defined _LIBC && !defined NOT_IN_libc
+#include <linuxthreads/internals.h>
+#endif
+
/* Mutex type. */
#if defined(_LIBC) || defined(_IO_MTSAFE_IO)
typedef pthread_mutex_t __libc_lock_t;
@@ -32,6 +36,7 @@ typedef pthread_rwlock_t __libc_rwlock_t;
# else
typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
# endif
+typedef __libc_lock_recursive_t __rtld_lock_recursive_t;
#else
typedef struct __libc_lock_opaque__ __libc_lock_t;
typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
@@ -54,6 +59,8 @@ typedef pthread_key_t __libc_key_t;
CLASS __libc_rwlock_t NAME;
#define __libc_lock_define_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME;
+#define __rtld_lock_define_recursive(CLASS,NAME) \
+ CLASS __rtld_lock_recursive_t NAME;
/* Define an initialized lock variable NAME with storage class CLASS.
@@ -82,14 +89,60 @@ typedef pthread_key_t __libc_key_t;
#define _LIBC_LOCK_RECURSIVE_INITIALIZER \
{PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
+#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
+ CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
+#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
+ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
+
+#if defined _LIBC && defined IS_IN_libpthread
+# define __libc_maybe_call(FUNC, ARGS, ELSE) FUNC ARGS
+#else
+# if defined __PIC__ || (defined _LIBC && defined SHARED)
+# define __libc_maybe_call(FUNC, ARGS, ELSE) \
+ (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
+ _fn != NULL ? (*_fn) ARGS : ELSE; }))
+# else
+# define __libc_maybe_call(FUNC, ARGS, ELSE) \
+ (FUNC != NULL ? FUNC ARGS : ELSE)
+# endif
+#endif
+#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
+# define __libc_maybe_call2(FUNC, ARGS, ELSE) \
+ ({__builtin_expect (__libc_pthread_functions.ptr_##FUNC != NULL, 0) \
+ ? __libc_pthread_functions.ptr_##FUNC ARGS : ELSE; })
+#else
+# define __libc_maybe_call2(FUNC, ARGS, ELSE) __libc_maybe_call (__##FUNC, ARGS, ELSE)
+#endif
+
/* Initialize the named lock variable, leaving it in a consistent, unlocked
state. */
+#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
#define __libc_lock_init(NAME) \
- (__pthread_mutex_init != NULL ? __pthread_mutex_init (&(NAME), NULL) : 0);
+ ({ \
+ (NAME).__m_count = 0; \
+ (NAME).__m_owner = NULL; \
+ (NAME).__m_kind = PTHREAD_MUTEX_TIMED_NP; \
+ (NAME).__m_lock.__status = 0; \
+ (NAME).__m_lock.__spinlock = __LT_SPINLOCK_INIT; \
+ 0; })
+#else
+#define __libc_lock_init(NAME) \
+ (__libc_maybe_call2 (pthread_mutex_init, (&(NAME), NULL), 0))
+#endif
#define __libc_rwlock_init(NAME) \
- (__pthread_rwlock_init != NULL ? __pthread_rwlock_init (&(NAME), NULL) : 0);
+ (__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0));
/* Same as last but this time we initialize a recursive mutex. */
+#if defined _LIBC && !defined NOT_IN_libc && defined SHARED
+#define __libc_lock_init_recursive(NAME) \
+ ({ \
+ (NAME).mutex.__m_count = 0; \
+ (NAME).mutex.__m_owner = NULL; \
+ (NAME).mutex.__m_kind = PTHREAD_MUTEX_RECURSIVE_NP; \
+ (NAME).mutex.__m_lock.__status = 0; \
+ (NAME).mutex.__m_lock.__spinlock = __LT_SPINLOCK_INIT; \
+ 0; })
+#else
#define __libc_lock_init_recursive(NAME) \
do { \
if (__pthread_mutex_init != NULL) \
@@ -101,52 +154,55 @@ typedef pthread_key_t __libc_key_t;
__pthread_mutexattr_destroy (&__attr); \
} \
} while (0);
+#endif
+#define __rtld_lock_init_recursive(NAME) \
+ __libc_lock_init_recursive (NAME)
/* Finalize the named lock variable, which must be locked. It cannot be
used again until __libc_lock_init is called again on it. This must be
called on a lock variable before the containing storage is reused. */
#define __libc_lock_fini(NAME) \
- (__pthread_mutex_destroy != NULL ? __pthread_mutex_destroy (&(NAME)) : 0);
+ (__libc_maybe_call2 (pthread_mutex_destroy, (&(NAME)), 0));
#define __libc_rwlock_fini(NAME) \
- (__pthread_rwlock_destroy != NULL ? __pthread_rwlock_destroy (&(NAME)) : 0);
+ (__libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0));
/* Finalize recursive named lock. */
#define __libc_lock_fini_recursive(NAME) __libc_lock_fini ((NAME).mutex)
+#define __rtld_lock_fini_recursive(NAME) __libc_lock_fini_recursive (NAME)
/* Lock the named lock variable. */
#define __libc_lock_lock(NAME) \
- (__pthread_mutex_lock != NULL ? __pthread_mutex_lock (&(NAME)) : 0);
+ (__libc_maybe_call2 (pthread_mutex_lock, (&(NAME)), 0));
#define __libc_rwlock_rdlock(NAME) \
- (__pthread_rwlock_rdlock != NULL ? __pthread_rwlock_rdlock (&(NAME)) : 0);
+ (__libc_maybe_call (__pthread_rwlock_rdlock, (&(NAME)), 0));
#define __libc_rwlock_wrlock(NAME) \
- (__pthread_rwlock_wrlock != NULL ? __pthread_rwlock_wrlock (&(NAME)) : 0);
+ (__libc_maybe_call (__pthread_rwlock_wrlock, (&(NAME)), 0));
/* Lock the recursive named lock variable. */
#define __libc_lock_lock_recursive(NAME) __libc_lock_lock ((NAME).mutex)
/* Try to lock the named lock variable. */
#define __libc_lock_trylock(NAME) \
- (__pthread_mutex_trylock != NULL ? __pthread_mutex_trylock (&(NAME)) : 0)
+ (__libc_maybe_call2 (pthread_mutex_trylock, (&(NAME)), 0))
#define __libc_rwlock_tryrdlock(NAME) \
- (__pthread_rwlock_tryrdlock != NULL \
- ? __pthread_rwlock_tryrdlock (&(NAME)) : 0)
+ (__libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0))
#define __libc_rwlock_trywrlock(NAME) \
- (__pthread_rwlock_trywrlock != NULL \
- ? __pthread_rwlock_trywrlock (&(NAME)) : 0)
+ (__libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0))
/* Try to lock the recursive named lock variable. */
#define __libc_lock_trylock_recursive(NAME) __libc_lock_trylock ((NAME).mutex)
+#define __rtld_lock_trylock_recursive(NAME) \
+ __libc_lock_trylock_recursive (NAME)
/* Unlock the named lock variable. */
#define __libc_lock_unlock(NAME) \
- (__pthread_mutex_unlock != NULL ? __pthread_mutex_unlock (&(NAME)) : 0);
+ (__libc_maybe_call2 (pthread_mutex_unlock, (&(NAME)), 0));
#define __libc_rwlock_unlock(NAME) \
- (__pthread_rwlock_unlock != NULL ? __pthread_rwlock_unlock (&(NAME)) : 0);
+ (__libc_maybe_call (__pthread_rwlock_unlock, (&(NAME)), 0));
/* Unlock the recursive named lock variable. */
#define __libc_lock_unlock_recursive(NAME) __libc_lock_unlock ((NAME).mutex)
-
/* Define once control variable. */
#if PTHREAD_ONCE_INIT == 0
/* Special case for static variables where we can avoid the initialization
@@ -191,22 +247,30 @@ typedef pthread_key_t __libc_key_t;
_pthread_cleanup_pop_restore (&_buffer, (DOIT)); \
}
+#define __libc_cleanup_push(fct, arg) \
+ { struct _pthread_cleanup_buffer _buffer; \
+ __libc_maybe_call (_pthread_cleanup_push, (&_buffer, (fct), (arg)), 0)
+
+#define __libc_cleanup_pop(execute) \
+ __libc_maybe_call (_pthread_cleanup_pop, (&_buffer, execute), 0); \
+ }
+
/* Create thread-specific key. */
#define __libc_key_create(KEY, DESTRUCTOR) \
- (__pthread_key_create != NULL ? __pthread_key_create (KEY, DESTRUCTOR) : 1)
+ (__libc_maybe_call (__pthread_key_create, (KEY, DESTRUCTOR), 1))
/* Get thread-specific data. */
#define __libc_getspecific(KEY) \
- (__pthread_getspecific != NULL ? __pthread_getspecific (KEY) : NULL)
+ (__libc_maybe_call (__pthread_getspecific, (KEY), NULL))
/* Set thread-specific data. */
#define __libc_setspecific(KEY, VALUE) \
- (__pthread_setspecific != NULL ? __pthread_setspecific (KEY, VALUE) : 0)
+ (__libc_maybe_call (__pthread_setspecific, (KEY, VALUE), 0))
/* Register handlers to execute before and after `fork'. */
#define __libc_atfork(PREPARE, PARENT, CHILD) \
- (__pthread_atfork != NULL ? __pthread_atfork (PREPARE, PARENT, CHILD) : 0)
+ (__libc_maybe_call (__pthread_atfork, (PREPARE, PARENT, CHILD), 0))
/* Functions that are used by this file and are internal to the GNU C
library. */
@@ -267,11 +331,7 @@ extern int __pthread_atfork (void (*__prepare) (void),
single-threaded processes. */
#ifndef __NO_WEAK_PTHREAD_ALIASES
# ifdef weak_extern
-# if _LIBC
-# include <bp-sym.h>
-# else
-# define BP_SYM (sym) sym
-# endif
+# define BP_SYM(sym) sym
weak_extern (BP_SYM (__pthread_mutex_init))
weak_extern (BP_SYM (__pthread_mutex_destroy))
weak_extern (BP_SYM (__pthread_mutex_lock))
@@ -293,6 +353,8 @@ weak_extern (BP_SYM (__pthread_getspecific))
weak_extern (BP_SYM (__pthread_once))
weak_extern (__pthread_initialize)
weak_extern (__pthread_atfork)
+weak_extern (BP_SYM (_pthread_cleanup_push))
+weak_extern (BP_SYM (_pthread_cleanup_pop))
weak_extern (BP_SYM (_pthread_cleanup_push_defer))
weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
# else
@@ -318,6 +380,8 @@ weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
# pragma weak __pthread_atfork
# pragma weak _pthread_cleanup_push_defer
# pragma weak _pthread_cleanup_pop_restore
+# pragma weak _pthread_cleanup_push
+# pragma weak _pthread_cleanup_pop
# endif
#endif