diff options
author | Waldemar Brodkorb <wbx@uclibc-ng.org> | 2016-05-18 18:41:24 +0200 |
---|---|---|
committer | Waldemar Brodkorb <wbx@uclibc-ng.org> | 2016-05-18 18:41:24 +0200 |
commit | 6a8ccc95528f5e86a8770ed15ce89609b5b3dee9 (patch) | |
tree | bbd4df35b4d4a6a8b00d7a5e61fb2668b850ad62 /libpthread/linuxthreads/pthread.c | |
parent | 398a27a5b323956344b4f831d892fed3bd9813c7 (diff) |
remove linuxthreads.new, rename linuxthreads.old
Linuxthreads.new isn't really useful with the existence
of NPTL/TLS for well supported architectures. There is no
reason to use LT.new for ARM/MIPS or other architectures
supporting NPTL/TLS. It is not available for noMMU architectures
like Blackfin or FR-V. To simplify the live of the few uClibc-ng
developers, LT.new is removed and LT.old is renamed to LT.
LINUXTHREADS_OLD -> UCLIBC_HAS_LINUXTHREADS
Diffstat (limited to 'libpthread/linuxthreads/pthread.c')
-rw-r--r-- | libpthread/linuxthreads/pthread.c | 1318 |
1 files changed, 550 insertions, 768 deletions
diff --git a/libpthread/linuxthreads/pthread.c b/libpthread/linuxthreads/pthread.c index 5dccd939f..00197b158 100644 --- a/libpthread/linuxthreads/pthread.c +++ b/libpthread/linuxthreads/pthread.c @@ -1,4 +1,3 @@ - /* Linuxthreads - a simple clone()-based implementation of Posix */ /* threads for Linux. */ /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */ @@ -16,6 +15,7 @@ /* Thread creation, initialization, and basic low-level routines */ #include <errno.h> +#include <netdb.h> /* for h_errno */ #include <stddef.h> #include <stdio.h> #include <stdlib.h> @@ -24,134 +24,176 @@ #include <fcntl.h> #include <sys/wait.h> #include <sys/resource.h> -#include <sys/time.h> #include "pthread.h" #include "internals.h" #include "spinlock.h" #include "restart.h" -#include "smp.h" -#include <not-cancel.h> +#include "debug.h" /* added to linuxthreads -StS */ -/* Sanity check. */ -#if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3 -# error "This must not happen" -#endif -#ifndef __UCLIBC_HAS_TLS__ +/* Mods for uClibc: Some includes */ +#include <signal.h> +#include <sys/types.h> +#include <sys/syscall.h> + +libpthread_hidden_proto(waitpid) +libpthread_hidden_proto(raise) + /* These variables are used by the setup code. */ extern int _errno; extern int _h_errno; -# if defined __UCLIBC_HAS_RESOLVER_SUPPORT__ -/* We need the global/static resolver state here. */ -# include <resolv.h> -# undef _res -extern struct __res_state *__resp; -# endif -#endif - -#ifdef __UCLIBC_HAS_TLS__ - -/* We need only a few variables. */ -#define manager_thread __pthread_manager_threadp -pthread_descr __pthread_manager_threadp attribute_hidden; - -#else /* Descriptor of the initial thread */ struct _pthread_descr_struct __pthread_initial_thread = { - .p_header.data.self = &__pthread_initial_thread, - .p_nextlive = &__pthread_initial_thread, - .p_prevlive = &__pthread_initial_thread, - .p_tid = PTHREAD_THREADS_MAX, - .p_lock = &__pthread_handles[0].h_lock, - .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL), -#ifndef __UCLIBC_HAS_TLS__ - .p_errnop = &_errno, - .p_h_errnop = &_h_errno, -#endif - .p_userstack = 1, - .p_resume_count = __ATOMIC_INITIALIZER, - .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF + &__pthread_initial_thread, /* pthread_descr p_nextlive */ + &__pthread_initial_thread, /* pthread_descr p_prevlive */ + NULL, /* pthread_descr p_nextwaiting */ + NULL, /* pthread_descr p_nextlock */ + PTHREAD_THREADS_MAX, /* pthread_t p_tid */ + 0, /* int p_pid */ + 0, /* int p_priority */ + &__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */ + 0, /* int p_signal */ + NULL, /* sigjmp_buf * p_signal_buf */ + NULL, /* sigjmp_buf * p_cancel_buf */ + 0, /* char p_terminated */ + 0, /* char p_detached */ + 0, /* char p_exited */ + NULL, /* void * p_retval */ + 0, /* int p_retval */ + NULL, /* pthread_descr p_joining */ + NULL, /* struct _pthread_cleanup_buffer * p_cleanup */ + 0, /* char p_cancelstate */ + 0, /* char p_canceltype */ + 0, /* char p_canceled */ + &_errno, /* int *p_errnop */ + 0, /* int p_errno */ + &_h_errno, /* int *p_h_errnop */ + 0, /* int p_h_errno */ + NULL, /* char * p_in_sighandler */ + 0, /* char p_sigwaiting */ + PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */ + {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */ + {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */ + 0, /* int p_userstack */ + NULL, /* void * p_guardaddr */ + 0, /* size_t p_guardsize */ + &__pthread_initial_thread, /* pthread_descr p_self */ + 0, /* Always index 0 */ + 0, /* int p_report_events */ + {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */ + __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */ + 0, /* char p_woken_by_cancel */ + 0, /* char p_condvar_avail */ + 0, /* char p_sem_avail */ + NULL, /* struct pthread_extricate_if *p_extricate */ + NULL, /* pthread_readlock_info *p_readlock_list; */ + NULL, /* pthread_readlock_info *p_readlock_free; */ + 0 /* int p_untracked_readlock_count; */ +#ifdef __UCLIBC_HAS_XLOCALE__ + , + &__global_locale_data, /* __locale_t locale; */ +#endif /* __UCLIBC_HAS_XLOCALE__ */ }; /* Descriptor of the manager thread; none of this is used but the error variables, the p_pid and p_priority fields, and the address for identification. */ - #define manager_thread (&__pthread_manager_thread) struct _pthread_descr_struct __pthread_manager_thread = { - .p_header.data.self = &__pthread_manager_thread, - .p_header.data.multiple_threads = 1, - .p_lock = &__pthread_handles[1].h_lock, - .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager), -#ifndef __UCLIBC_HAS_TLS__ - .p_errnop = &__pthread_manager_thread.p_errno, -#endif - .p_nr = 1, - .p_resume_count = __ATOMIC_INITIALIZER, - .p_alloca_cutoff = PTHREAD_STACK_MIN / 4 + NULL, /* pthread_descr p_nextlive */ + NULL, /* pthread_descr p_prevlive */ + NULL, /* pthread_descr p_nextwaiting */ + NULL, /* pthread_descr p_nextlock */ + 0, /* int p_tid */ + 0, /* int p_pid */ + 0, /* int p_priority */ + &__pthread_handles[1].h_lock, /* struct _pthread_fastlock * p_lock */ + 0, /* int p_signal */ + NULL, /* sigjmp_buf * p_signal_buf */ + NULL, /* sigjmp_buf * p_cancel_buf */ + 0, /* char p_terminated */ + 0, /* char p_detached */ + 0, /* char p_exited */ + NULL, /* void * p_retval */ + 0, /* int p_retval */ + NULL, /* pthread_descr p_joining */ + NULL, /* struct _pthread_cleanup_buffer * p_cleanup */ + 0, /* char p_cancelstate */ + 0, /* char p_canceltype */ + 0, /* char p_canceled */ + &__pthread_manager_thread.p_errno, /* int *p_errnop */ + 0, /* int p_errno */ + NULL, /* int *p_h_errnop */ + 0, /* int p_h_errno */ + NULL, /* char * p_in_sighandler */ + 0, /* char p_sigwaiting */ + PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */ + {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */ + {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */ + 0, /* int p_userstack */ + NULL, /* void * p_guardaddr */ + 0, /* size_t p_guardsize */ + &__pthread_manager_thread, /* pthread_descr p_self */ + 1, /* Always index 1 */ + 0, /* int p_report_events */ + {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */ + __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */ + 0, /* char p_woken_by_cancel */ + 0, /* char p_condvar_avail */ + 0, /* char p_sem_avail */ + NULL, /* struct pthread_extricate_if *p_extricate */ + NULL, /* pthread_readlock_info *p_readlock_list; */ + NULL, /* pthread_readlock_info *p_readlock_free; */ + 0 /* int p_untracked_readlock_count; */ +#ifdef __UCLIBC_HAS_XLOCALE__ + , + &__global_locale_data, /* __locale_t locale; */ +#endif /* __UCLIBC_HAS_XLOCALE__ */ }; -#endif /* Pointer to the main thread (the father of the thread manager thread) */ /* Originally, this is the initial thread, but this changes after fork() */ -#ifdef __UCLIBC_HAS_TLS__ -pthread_descr __pthread_main_thread; -#else pthread_descr __pthread_main_thread = &__pthread_initial_thread; -#endif /* Limit between the stack of the initial thread (above) and the stacks of other threads (below). Aligned on a STACK_SIZE boundary. */ -char *__pthread_initial_thread_bos; +char *__pthread_initial_thread_bos = NULL; + +#ifndef __ARCH_USE_MMU__ +/* See nommu notes in internals.h and pthread_initialize() below. */ +char *__pthread_initial_thread_tos = NULL; +char *__pthread_initial_thread_mid = NULL; +#endif /* __ARCH_USE_MMU__ */ /* File descriptor for sending requests to the thread manager. */ /* Initially -1, meaning that the thread manager is not running. */ int __pthread_manager_request = -1; -int __pthread_multiple_threads attribute_hidden; - /* Other end of the pipe for sending requests to the thread manager. */ int __pthread_manager_reader; /* Limits of the thread manager stack */ -char *__pthread_manager_thread_bos; -char *__pthread_manager_thread_tos; +char *__pthread_manager_thread_bos = NULL; +char *__pthread_manager_thread_tos = NULL; /* For process-wide exit() */ -int __pthread_exit_requested; -int __pthread_exit_code; - -/* Maximum stack size. */ -size_t __pthread_max_stacksize; - -/* Nozero if the machine has more than one processor. */ -int __pthread_smp_kernel; - - -#if !__ASSUME_REALTIME_SIGNALS -/* Pointers that select new or old suspend/resume functions - based on availability of rt signals. */ - -void (*__pthread_restart)(pthread_descr) = __pthread_restart_old; -void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old; -int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old; -#endif /* __ASSUME_REALTIME_SIGNALS */ +int __pthread_exit_requested = 0; +int __pthread_exit_code = 0; /* Communicate relevant LinuxThreads constants to gdb */ const int __pthread_threads_max = PTHREAD_THREADS_MAX; const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct); -const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct, - h_descr); +const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct, h_descr); const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct, p_pid); const int __linuxthreads_pthread_sizeof_descr @@ -162,53 +204,72 @@ const int __linuxthreads_initial_report_events; const char __linuxthreads_version[] = VERSION; /* Forward declarations */ - static void pthread_onexit_process(int retcode, void *arg); -#ifndef HAVE_Z_NODELETE -static void pthread_atexit_process(void *arg, int retcode); -static void pthread_atexit_retcode(void *arg, int retcode); -#endif static void pthread_handle_sigcancel(int sig); static void pthread_handle_sigrestart(int sig); static void pthread_handle_sigdebug(int sig); +int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime); /* Signal numbers used for the communication. In these variables we keep track of the used variables. If the platform does not support any real-time signals we will define the values to some unreasonable value which will signal failing of all the functions below. */ +#ifndef __NR_rt_sigaction +static int current_rtmin = -1; +static int current_rtmax = -1; +int __pthread_sig_restart = SIGUSR1; +int __pthread_sig_cancel = SIGUSR2; +int __pthread_sig_debug; +#else + +#if __SIGRTMAX - __SIGRTMIN >= 3 +static int current_rtmin = __SIGRTMIN + 3; +static int current_rtmax = __SIGRTMAX; int __pthread_sig_restart = __SIGRTMIN; int __pthread_sig_cancel = __SIGRTMIN + 1; int __pthread_sig_debug = __SIGRTMIN + 2; +void (*__pthread_restart)(pthread_descr) = __pthread_restart_new; +void (*__pthread_suspend)(pthread_descr) = __pthread_wait_for_restart_signal; +int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_new; +#else +static int current_rtmin = __SIGRTMIN; +static int current_rtmax = __SIGRTMAX; +int __pthread_sig_restart = SIGUSR1; +int __pthread_sig_cancel = SIGUSR2; +int __pthread_sig_debug; +void (*__pthread_restart)(pthread_descr) = __pthread_restart_old; +void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old; +int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old; -extern int __libc_current_sigrtmin_private (void); - -#if !__ASSUME_REALTIME_SIGNALS -static int rtsigs_initialized; +#endif -static void -init_rtsigs (void) +/* Return number of available real-time signal with highest priority. */ +int __libc_current_sigrtmin (void) { - if (rtsigs_initialized) - return; + return current_rtmin; +} - if (__libc_current_sigrtmin_private () == -1) - { - __pthread_sig_restart = SIGUSR1; - __pthread_sig_cancel = SIGUSR2; - __pthread_sig_debug = 0; - } - else - { - __pthread_restart = __pthread_restart_new; - __pthread_suspend = __pthread_wait_for_restart_signal; - __pthread_timedsuspend = __pthread_timedsuspend_new; - } +/* Return number of available real-time signal with lowest priority. */ +int __libc_current_sigrtmax (void) +{ + return current_rtmax; +} - rtsigs_initialized = 1; +#if 0 +/* Allocate real-time signal with highest/lowest available + priority. Please note that we don't use a lock since we assume + this function to be called at program start. */ +int __libc_allocate_rtsig (int high); +int __libc_allocate_rtsig (int high) +{ + if (current_rtmin == -1 || current_rtmin > current_rtmax) + /* We don't have anymore signal available. */ + return -1; + return high ? current_rtmin++ : current_rtmax--; } #endif - +#endif /* Initialize the pthread library. Initialization is split in two functions: @@ -219,14 +280,40 @@ init_rtsigs (void) static void pthread_initialize(void) __attribute__((constructor)); -#ifndef HAVE_Z_NODELETE -extern void *__dso_handle __attribute__ ((weak)); -#endif - - -#if defined __UCLIBC_HAS_TLS__ && !defined SHARED -extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign); -#endif +libpthread_hidden_proto(pthread_attr_destroy) +libpthread_hidden_proto(pthread_attr_init) +libpthread_hidden_proto(pthread_attr_getdetachstate) +libpthread_hidden_proto(pthread_attr_setdetachstate) +libpthread_hidden_proto(pthread_attr_getinheritsched) +libpthread_hidden_proto(pthread_attr_setinheritsched) +libpthread_hidden_proto(pthread_attr_setschedparam) +libpthread_hidden_proto(pthread_attr_getschedparam) +libpthread_hidden_proto(pthread_attr_getschedpolicy) +libpthread_hidden_proto(pthread_attr_setschedpolicy) +libpthread_hidden_proto(pthread_attr_getscope) +libpthread_hidden_proto(pthread_attr_setscope) + +libpthread_hidden_proto(pthread_exit) + +libpthread_hidden_proto(pthread_equal) +libpthread_hidden_proto(pthread_self) +libpthread_hidden_proto(pthread_getschedparam) +libpthread_hidden_proto(pthread_setschedparam) + +libpthread_hidden_proto(pthread_setcancelstate) +libpthread_hidden_proto(pthread_setcanceltype) +libpthread_hidden_proto(_pthread_cleanup_push_defer) +libpthread_hidden_proto(_pthread_cleanup_pop_restore) + +libpthread_hidden_proto(pthread_cond_broadcast) +libpthread_hidden_proto(pthread_cond_destroy) +libpthread_hidden_proto(pthread_cond_init) +libpthread_hidden_proto(pthread_cond_signal) +libpthread_hidden_proto(pthread_cond_wait) +libpthread_hidden_proto(pthread_cond_timedwait) + +libpthread_hidden_proto(pthread_condattr_destroy) +libpthread_hidden_proto(pthread_condattr_init) struct pthread_functions __pthread_functions = { @@ -235,49 +322,53 @@ struct pthread_functions __pthread_functions = .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get, .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address, #endif +/* .ptr_pthread_fork = __pthread_fork, - .ptr_pthread_attr_destroy = __pthread_attr_destroy, - .ptr_pthread_attr_init = __pthread_attr_init, - .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate, - .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate, - .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched, - .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched, - .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam, - .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam, - .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy, - .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy, - .ptr_pthread_attr_getscope = __pthread_attr_getscope, - .ptr_pthread_attr_setscope = __pthread_attr_setscope, - .ptr_pthread_condattr_destroy = __pthread_condattr_destroy, - .ptr_pthread_condattr_init = __pthread_condattr_init, - .ptr_pthread_cond_broadcast = __pthread_cond_broadcast, - .ptr_pthread_cond_destroy = __pthread_cond_destroy, - .ptr_pthread_cond_init = __pthread_cond_init, - .ptr_pthread_cond_signal = __pthread_cond_signal, - .ptr_pthread_cond_wait = __pthread_cond_wait, - .ptr_pthread_cond_timedwait = __pthread_cond_timedwait, - .ptr_pthread_equal = __pthread_equal, - .ptr___pthread_exit = __pthread_exit, - .ptr_pthread_getschedparam = __pthread_getschedparam, - .ptr_pthread_setschedparam = __pthread_setschedparam, +*/ + .ptr_pthread_attr_destroy = pthread_attr_destroy, + .ptr_pthread_attr_init = pthread_attr_init, + .ptr_pthread_attr_getdetachstate = pthread_attr_getdetachstate, + .ptr_pthread_attr_setdetachstate = pthread_attr_setdetachstate, + .ptr_pthread_attr_getinheritsched = pthread_attr_getinheritsched, + .ptr_pthread_attr_setinheritsched = pthread_attr_setinheritsched, + .ptr_pthread_attr_getschedparam = pthread_attr_getschedparam, + .ptr_pthread_attr_setschedparam = pthread_attr_setschedparam, + .ptr_pthread_attr_getschedpolicy = pthread_attr_getschedpolicy, + .ptr_pthread_attr_setschedpolicy = pthread_attr_setschedpolicy, + .ptr_pthread_attr_getscope = pthread_attr_getscope, + .ptr_pthread_attr_setscope = pthread_attr_setscope, + .ptr_pthread_condattr_destroy = pthread_condattr_destroy, + .ptr_pthread_condattr_init = pthread_condattr_init, + .ptr_pthread_cond_broadcast = pthread_cond_broadcast, + .ptr_pthread_cond_destroy = pthread_cond_destroy, + .ptr_pthread_cond_init = pthread_cond_init, + .ptr_pthread_cond_signal = pthread_cond_signal, + .ptr_pthread_cond_wait = pthread_cond_wait, + .ptr_pthread_cond_timedwait = pthread_cond_timedwait, + .ptr_pthread_equal = pthread_equal, + .ptr___pthread_exit = pthread_exit, + .ptr_pthread_getschedparam = pthread_getschedparam, + .ptr_pthread_setschedparam = pthread_setschedparam, .ptr_pthread_mutex_destroy = __pthread_mutex_destroy, .ptr_pthread_mutex_init = __pthread_mutex_init, .ptr_pthread_mutex_lock = __pthread_mutex_lock, .ptr_pthread_mutex_trylock = __pthread_mutex_trylock, .ptr_pthread_mutex_unlock = __pthread_mutex_unlock, - .ptr_pthread_self = __pthread_self, - .ptr_pthread_setcancelstate = __pthread_setcancelstate, - .ptr_pthread_setcanceltype = __pthread_setcanceltype, - .ptr_pthread_do_exit = __pthread_do_exit, - .ptr_pthread_thread_self = __pthread_thread_self, - .ptr_pthread_cleanup_upto = __pthread_cleanup_upto, - .ptr_pthread_sigaction = __pthread_sigaction, - .ptr_pthread_sigwait = __pthread_sigwait, - .ptr_pthread_raise = __pthread_raise, + .ptr_pthread_self = pthread_self, + .ptr_pthread_setcancelstate = pthread_setcancelstate, + .ptr_pthread_setcanceltype = pthread_setcanceltype, +/* + .ptr_pthread_do_exit = pthread_do_exit, + .ptr_pthread_thread_self = pthread_thread_self, + .ptr_pthread_cleanup_upto = pthread_cleanup_upto, + .ptr_pthread_sigaction = pthread_sigaction, + .ptr_pthread_sigwait = pthread_sigwait, + .ptr_pthread_raise = pthread_raise, .ptr__pthread_cleanup_push = _pthread_cleanup_push, - .ptr__pthread_cleanup_push_defer = _pthread_cleanup_push_defer, .ptr__pthread_cleanup_pop = _pthread_cleanup_pop, - .ptr__pthread_cleanup_pop_restore = _pthread_cleanup_pop_restore, +*/ + .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer, + .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore }; #ifdef SHARED # define ptr_pthread_functions &__pthread_functions @@ -287,224 +378,28 @@ struct pthread_functions __pthread_functions = static int *__libc_multiple_threads_ptr; -/* Do some minimal initialization which has to be done during the - startup of the C library. */ -void -__pthread_initialize_minimal(void) + /* Do some minimal initialization which has to be done during the + startup of the C library. */ +void __pthread_initialize_minimal(void) { -#ifdef __UCLIBC_HAS_TLS__ - pthread_descr self; - - /* First of all init __pthread_handles[0] and [1] if needed. */ -# if __LT_SPINLOCK_INIT != 0 - __pthread_handles[0].h_lock = __LOCK_INITIALIZER; - __pthread_handles[1].h_lock = __LOCK_INITIALIZER; -# endif -# ifndef SHARED - /* Unlike in the dynamically linked case the dynamic linker has not - taken care of initializing the TLS data structures. */ - __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN); -# elif !defined __UCLIBC_HAS_TLS__ - if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0)) - { - tcbhead_t *tcbp; - - /* There is no actual TLS being used, so the thread register - was not initialized in the dynamic linker. */ - - /* We need to install special hooks so that the malloc and memalign - calls in _dl_tls_setup and _dl_allocate_tls won't cause full - malloc initialization that will try to set up its thread state. */ - - extern void __libc_malloc_pthread_startup (bool first_time); - __libc_malloc_pthread_startup (true); - - if (__builtin_expect (_dl_tls_setup (), 0) - || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0)) - { - static const char msg[] = "\ -cannot allocate TLS data structures for initial thread\n"; - TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, - msg, sizeof msg - 1)); - abort (); - } - const char *lossage = TLS_INIT_TP (tcbp, 0); - if (__builtin_expect (lossage != NULL, 0)) - { - static const char msg[] = "cannot set up thread-local storage: "; - const char nl = '\n'; - TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, - msg, sizeof msg - 1)); - TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, - lossage, strlen (lossage))); - TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, &nl, 1)); - } - - /* Though it was allocated with libc's malloc, that was done without - the user's __malloc_hook installed. A later realloc that uses - the hooks might not work with that block from the plain malloc. - So we record this block as unfreeable just as the dynamic linker - does when it allocates the DTV before the libc malloc exists. */ - GL(dl_initial_dtv) = GET_DTV (tcbp); - - __libc_malloc_pthread_startup (false); - } -# endif - - self = THREAD_SELF; - - /* The memory for the thread descriptor was allocated elsewhere as - part of the TLS allocation. We have to initialize the data - structure by hand. This initialization must mirror the struct - definition above. */ - self->p_nextlive = self->p_prevlive = self; - self->p_tid = PTHREAD_THREADS_MAX; - self->p_lock = &__pthread_handles[0].h_lock; -# ifndef __UCLIBC_HAS_TLS__ - self->p_errnop = &_errno; - self->p_h_errnop = &_h_errno; -# endif - /* self->p_start_args need not be initialized, it's all zero. */ - self->p_userstack = 1; -# if __LT_SPINLOCK_INIT != 0 - self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER; -# endif - self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF; - - /* Another variable which points to the thread descriptor. */ - __pthread_main_thread = self; - - /* And fill in the pointer the the thread __pthread_handles array. */ - __pthread_handles[0].h_descr = self; - -#else /* __UCLIBC_HAS_TLS__ */ - - /* First of all init __pthread_handles[0] and [1]. */ -# if __LT_SPINLOCK_INIT != 0 - __pthread_handles[0].h_lock = __LOCK_INITIALIZER; - __pthread_handles[1].h_lock = __LOCK_INITIALIZER; -# endif - __pthread_handles[0].h_descr = &__pthread_initial_thread; - __pthread_handles[1].h_descr = &__pthread_manager_thread; - - /* If we have special thread_self processing, initialize that for the - main thread now. */ -# ifdef INIT_THREAD_SELF - INIT_THREAD_SELF(&__pthread_initial_thread, 0); -# endif -#endif - -#if HP_TIMING_AVAIL -# ifdef __UCLIBC_HAS_TLS__ - self->p_cpuclock_offset = GL(dl_cpuclock_offset); -# else - __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset); -# endif + /* If we have special thread_self processing, initialize + * that for the main thread now. */ +#ifdef INIT_THREAD_SELF + INIT_THREAD_SELF(&__pthread_initial_thread, 0); #endif - __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions); + __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions); } -void -__pthread_init_max_stacksize(void) -{ - struct rlimit limit; - size_t max_stack; - - getrlimit(RLIMIT_STACK, &limit); -#ifdef FLOATING_STACKS - if (limit.rlim_cur == RLIM_INFINITY) - limit.rlim_cur = ARCH_STACK_MAX_SIZE; -# ifdef NEED_SEPARATE_REGISTER_STACK - max_stack = limit.rlim_cur / 2; -# else - max_stack = limit.rlim_cur; -# endif -#else - /* Play with the stack size limit to make sure that no stack ever grows - beyond STACK_SIZE minus one page (to act as a guard page). */ -# ifdef NEED_SEPARATE_REGISTER_STACK - /* STACK_SIZE bytes hold both the main stack and register backing - store. The rlimit value applies to each individually. */ - max_stack = STACK_SIZE/2 - __getpagesize (); -# else - max_stack = STACK_SIZE - __getpagesize(); -# endif - if (limit.rlim_cur > max_stack) { - limit.rlim_cur = max_stack; - setrlimit(RLIMIT_STACK, &limit); - } -#endif - __pthread_max_stacksize = max_stack; - if (max_stack / 4 < __MAX_ALLOCA_CUTOFF) - { -#ifdef __UCLIBC_HAS_TLS__ - pthread_descr self = THREAD_SELF; - self->p_alloca_cutoff = max_stack / 4; -#else - __pthread_initial_thread.p_alloca_cutoff = max_stack / 4; -#endif - } -} - -#if defined SHARED && defined __UCLIBC_HAS_TLS__ -# ifdef __UCLIBC_HAS_TLS__ -/* When using __thread for this, we do it in libc so as not - to give libpthread its own TLS segment just for this. */ -extern void **__libc_dl_error_tsd (void) __attribute__ ((const)); -# else -static void ** __attribute__ ((const)) -__libc_dl_error_tsd (void) -{ - return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR]; -} -# endif -#endif - -#ifdef __UCLIBC_HAS_TLS__ -static __inline__ void __attribute__((always_inline)) -init_one_static_tls (pthread_descr descr, struct link_map *map) -{ -# if defined(TLS_TCB_AT_TP) - dtv_t *dtv = GET_DTV (descr); - void *dest = (char *) descr - map->l_tls_offset; -# elif defined(TLS_DTV_AT_TP) - dtv_t *dtv = GET_DTV ((pthread_descr) ((char *) descr + TLS_PRE_TCB_SIZE)); - void *dest = (char *) descr + map->l_tls_offset + TLS_PRE_TCB_SIZE; -# else -# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" -# endif - - /* Fill in the DTV slot so that a later LD/GD access will find it. */ - dtv[map->l_tls_modid].pointer.val = dest; - dtv[map->l_tls_modid].pointer.is_static = true; - - /* Initialize the memory. */ - memset (mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size), - '\0', map->l_tls_blocksize - map->l_tls_initimage_size); -} - -static void -__pthread_init_static_tls (struct link_map *map) -{ - size_t i; - - for (i = 0; i < PTHREAD_THREADS_MAX; ++i) - if (__pthread_handles[i].h_descr != NULL && i != 1) - { - __pthread_lock (&__pthread_handles[i].h_lock, NULL); - if (__pthread_handles[i].h_descr != NULL) - init_one_static_tls (__pthread_handles[i].h_descr, map); - __pthread_unlock (&__pthread_handles[i].h_lock); - } -} -#endif - static void pthread_initialize(void) { struct sigaction sa; sigset_t mask; +#ifdef __ARCH_USE_MMU__ + struct rlimit limit; + rlim_t max_stack; +#endif /* If already done (e.g. by a constructor called earlier!), bail out */ if (__pthread_initial_thread_bos != NULL) return; @@ -512,44 +407,70 @@ static void pthread_initialize(void) /* Test if compare-and-swap is available */ __pthread_has_cas = compare_and_swap_is_available(); #endif -#ifdef FLOATING_STACKS - /* We don't need to know the bottom of the stack. Give the pointer some - value to signal that initialization happened. */ - __pthread_initial_thread_bos = (void *) -1l; -#else - /* Determine stack size limits . */ - __pthread_init_max_stacksize (); -# ifdef _STACK_GROWS_UP - /* The initial thread already has all the stack it needs */ - __pthread_initial_thread_bos = (char *) - ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1)); -# else /* For the initial stack, reserve at least STACK_SIZE bytes of stack below the current stack address, and align that on a STACK_SIZE boundary. */ __pthread_initial_thread_bos = (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1)); -# endif -#endif -#ifdef __UCLIBC_HAS_TLS__ - /* Update the descriptor for the initial thread. */ - THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid()); -# if defined __UCLIBC_HAS_RESOLVER_SUPPORT__ - /* Likewise for the resolver state _res. */ - THREAD_SETMEM (((pthread_descr) NULL), p_resp, __resp); -# endif -#else /* Update the descriptor for the initial thread. */ - __pthread_initial_thread.p_pid = __getpid(); -# if defined __UCLIBC_HAS_RESOLVER_SUPPORT__ - /* Likewise for the resolver state _res. */ - __pthread_initial_thread.p_resp = __resp; -# endif -#endif -#if !__ASSUME_REALTIME_SIGNALS - /* Initialize real-time signals. */ - init_rtsigs (); + __pthread_initial_thread.p_pid = getpid(); + /* If we have special thread_self processing, initialize that for the + main thread now. */ +#ifdef INIT_THREAD_SELF + INIT_THREAD_SELF(&__pthread_initial_thread, 0); #endif + /* The errno/h_errno variable of the main thread are the global ones. */ + __pthread_initial_thread.p_errnop = &_errno; + __pthread_initial_thread.p_h_errnop = &_h_errno; + +#ifdef __UCLIBC_HAS_XLOCALE__ + /* The locale of the main thread is the current locale in use. */ + __pthread_initial_thread.locale = __curlocale_var; +#endif /* __UCLIBC_HAS_XLOCALE__ */ + + { /* uClibc-specific stdio initialization for threads. */ + FILE *fp; + + _stdio_user_locking = 0; /* 2 if threading not initialized */ + for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) { + if (fp->__user_locking != 1) { + fp->__user_locking = 0; + } + } + } + + /* Play with the stack size limit to make sure that no stack ever grows + beyond STACK_SIZE minus two pages (one page for the thread descriptor + immediately beyond, and one page to act as a guard page). */ + +#ifdef __ARCH_USE_MMU__ + /* We cannot allocate a huge chunk of memory to mmap all thread stacks later + * on a non-MMU system. Thus, we don't need the rlimit either. -StS */ + getrlimit(RLIMIT_STACK, &limit); + max_stack = STACK_SIZE - 2 * getpagesize(); + if (limit.rlim_cur > max_stack) { + limit.rlim_cur = max_stack; + setrlimit(RLIMIT_STACK, &limit); + } +#else + /* For non-MMU, the initial thread stack can reside anywhere in memory. + * We don't have a way of knowing where the kernel started things -- top + * or bottom (well, that isn't exactly true, but the solution is fairly + * complex and error prone). All we can determine here is an address + * that lies within that stack. Save that address as a reference so that + * as other thread stacks are created, we can adjust the estimated bounds + * of the initial thread's stack appropriately. + * + * This checking is handled in NOMMU_INITIAL_THREAD_BOUNDS(), so see that + * for a few more details. + */ + __pthread_initial_thread_mid = CURRENT_STACK_FRAME; + __pthread_initial_thread_tos = (char *) -1; + __pthread_initial_thread_bos = (char *) 1; /* set it non-zero so we know we have been here */ + PDEBUG("initial thread stack bounds: bos=%p, tos=%p\n", + __pthread_initial_thread_bos, __pthread_initial_thread_tos); +#endif /* __ARCH_USE_MMU__ */ + /* Setup signal handlers for the initial thread. Since signal handlers are shared between threads, these settings will be inherited by all other threads. */ @@ -560,9 +481,9 @@ static void pthread_initialize(void) sigaddset(&sa.sa_mask, __pthread_sig_restart); __libc_sigaction(__pthread_sig_cancel, &sa, NULL); if (__pthread_sig_debug > 0) { - sa.sa_handler = pthread_handle_sigdebug; - __sigemptyset(&sa.sa_mask); - __libc_sigaction(__pthread_sig_debug, &sa, NULL); + sa.sa_handler = pthread_handle_sigdebug; + __sigemptyset(&sa.sa_mask); + __libc_sigaction(__pthread_sig_debug, &sa, NULL); } /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */ __sigemptyset(&mask); @@ -574,48 +495,11 @@ static void pthread_initialize(void) sigprocmask(SIG_UNBLOCK, &mask, NULL); /* Register an exit function to kill all other threads. */ /* Do it early so that user-registered atexit functions are called - before pthread_*exit_process. */ -#ifndef HAVE_Z_NODELETE - if (__builtin_expect (&__dso_handle != NULL, 1)) - __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL, - __dso_handle); - else -#endif - __on_exit (pthread_onexit_process, NULL); - /* How many processors. */ - __pthread_smp_kernel = is_smp_system (); - -#if defined SHARED && defined __UCLIBC_HAS_TLS__ - /* Transfer the old value from the dynamic linker's internal location. */ - *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) (); - GL(dl_error_catch_tsd) = &__libc_dl_error_tsd; - - /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock, - keep the lock count from the ld.so implementation. */ - GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock; - GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock; - unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__m_count; - GL(dl_load_lock).mutex.__m_count = 0; - while (rtld_lock_count-- > 0) - __pthread_mutex_lock (&GL(dl_load_lock).mutex); -#endif - -#ifdef __UCLIBC_HAS_TLS__ - GL(dl_init_static_tls) = &__pthread_init_static_tls; -#endif - - /* uClibc-specific stdio initialization for threads. */ - { - FILE *fp; - _stdio_user_locking = 0; /* 2 if threading not initialized */ - for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) { - if (fp->__user_locking != 1) { - fp->__user_locking = 0; - } - } - } + before pthread_onexit_process. */ + on_exit(pthread_onexit_process, NULL); } +void __pthread_initialize(void); void __pthread_initialize(void) { pthread_initialize(); @@ -625,27 +509,11 @@ int __pthread_initialize_manager(void) { int manager_pipe[2]; int pid; - struct pthread_request request; int report_events; - pthread_descr mgr; -#ifdef __UCLIBC_HAS_TLS__ - tcbhead_t *tcbp; -#endif + struct pthread_request request; - __pthread_multiple_threads = 1; -#if TLS_MULTIPLE_THREADS_IN_TCB || !defined __UCLIBC_HAS_TLS__ || !TLS_DTV_AT_TP - __pthread_main_thread->p_multiple_threads = 1; -#endif *__libc_multiple_threads_ptr = 1; -#ifndef HAVE_Z_NODELETE - if (__builtin_expect (&__dso_handle != NULL, 1)) - __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL, - __dso_handle); -#endif - - if (__pthread_max_stacksize == 0) - __pthread_init_max_stacksize (); /* If basic initialization not done yet (e.g. we're called from a constructor run before our constructor), do it now */ if (__pthread_initial_thread_bos == NULL) pthread_initialize(); @@ -654,59 +522,25 @@ int __pthread_initialize_manager(void) if (__pthread_manager_thread_bos == NULL) return -1; __pthread_manager_thread_tos = __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE; + + /* On non-MMU systems we make sure that the initial thread bounds don't overlap + * with the manager stack frame */ + NOMMU_INITIAL_THREAD_BOUNDS(__pthread_manager_thread_tos,__pthread_manager_thread_bos); + PDEBUG("manager stack: size=%ld, bos=%p, tos=%p\n", THREAD_MANAGER_STACK_SIZE, + __pthread_manager_thread_bos, __pthread_manager_thread_tos); +#if 0 + PDEBUG("initial stack: estimate bos=%p, tos=%p\n", + __pthread_initial_thread_bos, __pthread_initial_thread_tos); +#endif + /* Setup pipe to communicate with thread manager */ if (pipe(manager_pipe) == -1) { free(__pthread_manager_thread_bos); return -1; } - -#ifdef __UCLIBC_HAS_TLS__ - /* Allocate memory for the thread descriptor and the dtv. */ - tcbp = _dl_allocate_tls (NULL); - if (tcbp == NULL) { - free(__pthread_manager_thread_bos); - close_not_cancel(manager_pipe[0]); - close_not_cancel(manager_pipe[1]); - return -1; - } - -# if defined(TLS_TCB_AT_TP) - mgr = (pthread_descr) tcbp; -# elif defined(TLS_DTV_AT_TP) - /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls - returns. */ - mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE); -# endif - __pthread_handles[1].h_descr = manager_thread = mgr; - - /* Initialize the descriptor. */ -#if !defined __UCLIBC_HAS_TLS__ || !TLS_DTV_AT_TP - mgr->p_header.data.tcb = tcbp; - mgr->p_header.data.self = mgr; - mgr->p_header.data.multiple_threads = 1; -#elif TLS_MULTIPLE_THREADS_IN_TCB - mgr->p_multiple_threads = 1; -#endif - mgr->p_lock = &__pthread_handles[1].h_lock; -# ifndef __UCLIBC_HAS_TLS__ - mgr->p_errnop = &mgr->p_errno; -# endif - mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager); - mgr->p_nr = 1; -# if __LT_SPINLOCK_INIT != 0 - self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER; -# endif - mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4; -#else - mgr = &__pthread_manager_thread; -#endif - - __pthread_manager_request = manager_pipe[1]; /* writing end */ - __pthread_manager_reader = manager_pipe[0]; /* reading end */ - /* Start the thread manager */ pid = 0; -#ifdef __UCLIBC_HAS_TLS__ +#if defined(USE_TLS) && USE_TLS if (__linuxthreads_initial_report_events != 0) THREAD_SETMEM (((pthread_descr) NULL), p_report_events, __linuxthreads_initial_report_events); @@ -723,36 +557,25 @@ int __pthread_initialize_manager(void) the manager thread. */ int idx = __td_eventword (TD_CREATE); uint32_t mask = __td_eventmask (TD_CREATE); - uint32_t event_bits; -#ifdef __UCLIBC_HAS_TLS__ - event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL), - p_eventbuf.eventmask.event_bits[idx]); -#else - event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx]; -#endif - - if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits)) + if ((mask & (__pthread_threads_events.event_bits[idx] + | __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx])) != 0) { - __pthread_lock(mgr->p_lock, NULL); -#ifdef NEED_SEPARATE_REGISTER_STACK + __pthread_lock(__pthread_manager_thread.p_lock, NULL); + +#ifdef __ia64__ pid = __clone2(__pthread_manager_event, - (void **) __pthread_manager_thread_bos, - THREAD_MANAGER_STACK_SIZE, - CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, - mgr); -#elif defined _STACK_GROWS_UP - pid = __clone(__pthread_manager_event, - (void **) __pthread_manager_thread_bos, - CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, - mgr); + (void **) __pthread_manager_thread_tos, + THREAD_MANAGER_STACK_SIZE, + CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, + (void *)(long)manager_pipe[0]); #else - pid = __clone(__pthread_manager_event, + pid = clone(__pthread_manager_event, (void **) __pthread_manager_thread_tos, - CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, - mgr); + CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, + (void *)(long)manager_pipe[0]); #endif if (pid != -1) @@ -761,48 +584,46 @@ int __pthread_initialize_manager(void) the newly created thread's data structure. We cannot let the new thread do this since we don't know whether it was already scheduled when we send the event. */ - mgr->p_eventbuf.eventdata = mgr; - mgr->p_eventbuf.eventnum = TD_CREATE; - __pthread_last_event = mgr; - mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1; - mgr->p_pid = pid; + __pthread_manager_thread.p_eventbuf.eventdata = + &__pthread_manager_thread; + __pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE; + __pthread_last_event = &__pthread_manager_thread; + __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1; + __pthread_manager_thread.p_pid = pid; /* Now call the function which signals the event. */ __linuxthreads_create_event (); } - /* Now restart the thread. */ - __pthread_unlock(mgr->p_lock); + __pthread_unlock(__pthread_manager_thread.p_lock); } } - if (__builtin_expect (pid, 0) == 0) - { -#ifdef NEED_SEPARATE_REGISTER_STACK - pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos, - THREAD_MANAGER_STACK_SIZE, - CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr); -#elif defined _STACK_GROWS_UP - pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos, - CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr); + if (pid == 0) { +#ifdef __ia64__ + pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_tos, + THREAD_MANAGER_STACK_SIZE, + CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, + (void *)(long)manager_pipe[0]); #else - pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos, - CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_SYSVSEM, mgr); -#endif - } - if (__builtin_expect (pid, 0) == -1) { -#ifdef __UCLIBC_HAS_TLS__ - _dl_deallocate_tls (tcbp, true); + pid = clone(__pthread_manager, (void **) __pthread_manager_thread_tos, + CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, + (void *)(long)manager_pipe[0]); #endif + } + if (pid == -1) { free(__pthread_manager_thread_bos); - close_not_cancel(manager_pipe[0]); - close_not_cancel(manager_pipe[1]); + close(manager_pipe[0]); + close(manager_pipe[1]); return -1; } - mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1; - mgr->p_pid = pid; + __pthread_manager_request = manager_pipe[1]; /* writing end */ + __pthread_manager_reader = manager_pipe[0]; /* reading end */ + __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1; + __pthread_manager_thread.p_pid = pid; + /* Make gdb aware of new thread manager */ - if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0) + if (__pthread_threads_debug && __pthread_sig_debug > 0) { raise(__pthread_sig_debug); /* We suspend ourself and gdb will wake us up when it is @@ -810,21 +631,21 @@ int __pthread_initialize_manager(void) __pthread_wait_for_restart_signal(thread_self()); } /* Synchronize debugging of the thread manager */ + PDEBUG("send REQ_DEBUG to manager thread\n"); request.req_kind = REQ_DEBUG; - TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request, - (char *) &request, sizeof(request))); + TEMP_FAILURE_RETRY(write(__pthread_manager_request, + (char *) &request, sizeof(request))); return 0; } /* Thread creation */ -int __pthread_create(pthread_t *thread, const pthread_attr_t *attr, +int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void * (*start_routine)(void *), void *arg) { pthread_descr self = thread_self(); struct pthread_request request; - int retval; - if (__builtin_expect (__pthread_manager_request, 0) < 0) { + if (__pthread_manager_request < 0) { if (__pthread_initialize_manager() < 0) return EAGAIN; } request.req_thread = self; @@ -833,35 +654,31 @@ int __pthread_create(pthread_t *thread, const pthread_attr_t *attr, request.req_args.create.fn = start_routine; request.req_args.create.arg = arg; sigprocmask(SIG_SETMASK, NULL, &request.req_args.create.mask); - TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request, - (char *) &request, sizeof(request))); + PDEBUG("write REQ_CREATE to manager thread\n"); + TEMP_FAILURE_RETRY(write(__pthread_manager_request, + (char *) &request, sizeof(request))); + PDEBUG("before suspend(self)\n"); suspend(self); - retval = THREAD_GETMEM(self, p_retcode); - if (__builtin_expect (retval, 0) == 0) + PDEBUG("after suspend(self)\n"); + if (THREAD_GETMEM(self, p_retcode) == 0) *thread = (pthread_t) THREAD_GETMEM(self, p_retval); - return retval; + return THREAD_GETMEM(self, p_retcode); } -strong_alias (__pthread_create, pthread_create) /* Simple operations on thread identifiers */ -pthread_descr __pthread_thread_self(void) -{ - return thread_self(); -} - -pthread_t __pthread_self(void) +pthread_t pthread_self(void) { pthread_descr self = thread_self(); return THREAD_GETMEM(self, p_tid); } -strong_alias (__pthread_self, pthread_self) +libpthread_hidden_def (pthread_self) -int __pthread_equal(pthread_t thread1, pthread_t thread2) +int pthread_equal(pthread_t thread1, pthread_t thread2) { return thread1 == thread2; } -strong_alias (__pthread_equal, pthread_equal) +libpthread_hidden_def (pthread_equal) /* Helper function for thread_self in the case of user-provided stacks */ @@ -875,64 +692,55 @@ pthread_descr __pthread_find_self(void) /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is the manager threads handled specially in thread_self(), so start at 2 */ h = __pthread_handles + 2; -# ifdef _STACK_GROWS_UP - while (! (sp >= (char *) h->h_descr && sp < (char *) h->h_descr->p_guardaddr)) h++; -# else while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++; -# endif + +#ifdef DEBUG_PT + if (h->h_descr == NULL) { + printf("*** %s ERROR descriptor is NULL!!!!! ***\n\n", __FUNCTION__); + _exit(1); + } +#endif + return h->h_descr; } - #else -pthread_descr __pthread_self_stack(void) +static pthread_descr thread_self_stack(void) { - char *sp = CURRENT_STACK_FRAME; - pthread_handle h; - - if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos) - return manager_thread; - h = __pthread_handles + 2; -# ifdef __UCLIBC_HAS_TLS__ -# ifdef _STACK_GROWS_UP - while (h->h_descr == NULL - || ! (sp >= h->h_descr->p_stackaddr && sp < h->h_descr->p_guardaddr)) - h++; -# else - while (h->h_descr == NULL - || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom)) - h++; -# endif + char *sp = CURRENT_STACK_FRAME; + pthread_handle h; + + if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos) + return manager_thread; + h = __pthread_handles + 2; +# if defined(USE_TLS) && USE_TLS + while (h->h_descr == NULL + || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom)) + h++; # else -# ifdef _STACK_GROWS_UP - while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr)) - h++; -# else - while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) - h++; -# endif + while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) + h++; # endif - return h->h_descr; + return h->h_descr; } #endif /* Thread scheduling */ -int __pthread_setschedparam(pthread_t thread, int policy, - const struct sched_param *param) +int pthread_setschedparam(pthread_t thread, int policy, + const struct sched_param *param) { pthread_handle handle = thread_handle(thread); pthread_descr th; __pthread_lock(&handle->h_lock, NULL); - if (__builtin_expect (invalid_handle(handle, thread), 0)) { + if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; } th = handle->h_descr; - if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1, - 0)) { + if (sched_setscheduler(th->p_pid, policy, param) == -1) { __pthread_unlock(&handle->h_lock); return errno; } @@ -942,85 +750,53 @@ int __pthread_setschedparam(pthread_t thread, int policy, __pthread_manager_adjust_prio(th->p_priority); return 0; } -strong_alias (__pthread_setschedparam, pthread_setschedparam) +libpthread_hidden_def(pthread_setschedparam) -int __pthread_getschedparam(pthread_t thread, int *policy, - struct sched_param *param) +int pthread_getschedparam(pthread_t thread, int *policy, + struct sched_param *param) { pthread_handle handle = thread_handle(thread); int pid, pol; __pthread_lock(&handle->h_lock, NULL); - if (__builtin_expect (invalid_handle(handle, thread), 0)) { + if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; } pid = handle->h_descr->p_pid; __pthread_unlock(&handle->h_lock); - pol = __sched_getscheduler(pid); - if (__builtin_expect (pol, 0) == -1) return errno; - if (__sched_getparam(pid, param) == -1) return errno; + pol = sched_getscheduler(pid); + if (pol == -1) return errno; + if (sched_getparam(pid, param) == -1) return errno; *policy = pol; return 0; } -strong_alias (__pthread_getschedparam, pthread_getschedparam) +libpthread_hidden_def(pthread_getschedparam) /* Process-wide exit() request */ -static void pthread_onexit_process(int retcode, void *arg) +static void pthread_onexit_process(int retcode, void *arg attribute_unused) { - if (__builtin_expect (__pthread_manager_request, 0) >= 0) { struct pthread_request request; pthread_descr self = thread_self(); - /* Make sure we come back here after suspend(), in case we entered - from a signal handler. */ - THREAD_SETMEM(self, p_signal_jmp, NULL); - - request.req_thread = self; - request.req_kind = REQ_PROCESS_EXIT; - request.req_args.exit.code = retcode; - TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request, - (char *) &request, sizeof(request))); - suspend(self); - /* Main thread should accumulate times for thread manager and its - children, so that timings for main thread account for all threads. */ - if (self == __pthread_main_thread) - { -#ifdef __UCLIBC_HAS_TLS__ - waitpid(manager_thread->p_pid, NULL, __WCLONE); -#else - waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE); -#endif - /* Since all threads have been asynchronously terminated - (possibly holding locks), free cannot be used any more. - For mtrace, we'd like to print something though. */ - /* #ifdef __UCLIBC_HAS_TLS__ - tcbhead_t *tcbp = (tcbhead_t *) manager_thread; - # if defined(TLS_DTV_AT_TP) - tcbp = (tcbhead_t) ((char *) tcbp + TLS_PRE_TCB_SIZE); - # endif - _dl_deallocate_tls (tcbp, true); - #endif - free (__pthread_manager_thread_bos); */ - __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL; - } - } -} - -#ifndef HAVE_Z_NODELETE -static int __pthread_atexit_retcode; - -static void pthread_atexit_process(void *arg, int retcode) -{ - pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg); -} - -static void pthread_atexit_retcode(void *arg, int retcode) -{ - __pthread_atexit_retcode = retcode; + if (__pthread_manager_request >= 0) { + request.req_thread = self; + request.req_kind = REQ_PROCESS_EXIT; + request.req_args.exit.code = retcode; + TEMP_FAILURE_RETRY(write(__pthread_manager_request, + (char *) &request, sizeof(request))); + suspend(self); + /* Main thread should accumulate times for thread manager and its + children, so that timings for main thread account for all threads. */ + if (self == __pthread_main_thread) { + waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE); + /* Since all threads have been asynchronously terminated + * (possibly holding locks), free cannot be used any more. */ + __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL; + } + } } -#endif /* The handler for the RESTART signal just records the signal received in the thread descriptor, and optionally performs a siglongjmp @@ -1028,10 +804,10 @@ static void pthread_atexit_retcode(void *arg, int retcode) static void pthread_handle_sigrestart(int sig) { - pthread_descr self = check_thread_self(); - THREAD_SETMEM(self, p_signal, sig); - if (THREAD_GETMEM(self, p_signal_jmp) != NULL) - siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1); + pthread_descr self = thread_self(); + THREAD_SETMEM(self, p_signal, sig); + if (THREAD_GETMEM(self, p_signal_jmp) != NULL) + siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1); } /* The handler for the CANCEL signal checks for cancellation @@ -1041,20 +817,39 @@ static void pthread_handle_sigrestart(int sig) static void pthread_handle_sigcancel(int sig) { - pthread_descr self = check_thread_self(); + pthread_descr self = thread_self(); sigjmp_buf * jmpbuf; - if (self == manager_thread) + + if (self == &__pthread_manager_thread) { +#ifdef THREAD_SELF + /* A new thread might get a cancel signal before it is fully + initialized, so that the thread register might still point to the + manager thread. Double check that this is really the manager + thread. */ + pthread_descr real_self = thread_self_stack(); + if (real_self == &__pthread_manager_thread) + { + __pthread_manager_sighandler(sig); + return; + } + /* Oops, thread_self() isn't working yet.. */ + self = real_self; +# ifdef INIT_THREAD_SELF + INIT_THREAD_SELF(self, self->p_nr); +# endif +#else __pthread_manager_sighandler(sig); return; +#endif } if (__builtin_expect (__pthread_exit_requested, 0)) { /* Main thread should accumulate times for thread manager and its children, so that timings for main thread account for all threads. */ if (self == __pthread_main_thread) { -#ifdef __UCLIBC_HAS_TLS__ - waitpid(manager_thread->p_pid, NULL, __WCLONE); +#if defined(USE_TLS) && USE_TLS + waitpid(__pthread_manager_thread->p_pid, NULL, __WCLONE); #else waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE); #endif @@ -1083,7 +878,7 @@ static void pthread_handle_sigcancel(int sig) know what it is specifically done for. In the current implementation, the thread manager simply discards it. */ -static void pthread_handle_sigdebug(int sig) +static void pthread_handle_sigdebug(int sig attribute_unused) { /* Nothing */ } @@ -1103,39 +898,20 @@ void __pthread_reset_main_thread(void) free(__pthread_manager_thread_bos); __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL; /* Close the two ends of the pipe */ - close_not_cancel(__pthread_manager_request); - close_not_cancel(__pthread_manager_reader); + close(__pthread_manager_request); + close(__pthread_manager_reader); __pthread_manager_request = __pthread_manager_reader = -1; } /* Update the pid of the main thread */ - THREAD_SETMEM(self, p_pid, __getpid()); + THREAD_SETMEM(self, p_pid, getpid()); /* Make the forked thread the main thread */ __pthread_main_thread = self; THREAD_SETMEM(self, p_nextlive, self); THREAD_SETMEM(self, p_prevlive, self); -#ifndef __UCLIBC_HAS_TLS__ /* Now this thread modifies the global variables. */ THREAD_SETMEM(self, p_errnop, &_errno); THREAD_SETMEM(self, p_h_errnop, &_h_errno); -# if defined __UCLIBC_HAS_RESOLVER_SUPPORT__ - THREAD_SETMEM(self, p_resp, __resp); -# endif -#endif - -#ifndef FLOATING_STACKS - /* This is to undo the setrlimit call in __pthread_init_max_stacksize. - XXX This can be wrong if the user set the limit during the run. */ - { - struct rlimit limit; - if (getrlimit (RLIMIT_STACK, &limit) == 0 - && limit.rlim_cur != limit.rlim_max) - { - limit.rlim_cur = limit.rlim_max; - setrlimit(RLIMIT_STACK, &limit); - } - } -#endif } /* Process-wide exec() request */ @@ -1148,7 +924,6 @@ void __pthread_kill_other_threads_np(void) /* Make current thread the main thread in case the calling thread changes its mind, does not exec(), and creates new threads instead. */ __pthread_reset_main_thread(); - /* Reset the signal handlers behaviour for the signals the implementation uses since this would be passed to the new process. */ @@ -1179,24 +954,24 @@ int __pthread_getconcurrency(void) } weak_alias (__pthread_getconcurrency, pthread_getconcurrency) + /* Primitives for controlling thread execution */ void __pthread_wait_for_restart_signal(pthread_descr self) { - sigset_t mask; + sigset_t mask; - sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */ - sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */ - THREAD_SETMEM(self, p_signal, 0); - do { - __pthread_sigsuspend(&mask); /* Wait for signal. Must not be a - cancellation point. */ - } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart); + sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */ + sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */ + THREAD_SETMEM(self, p_signal, 0); + do { + sigsuspend(&mask); /* Wait for signal */ + } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart); - READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */ + READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */ } -#if !__ASSUME_REALTIME_SIGNALS +#ifndef __NR_rt_sigaction /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT signals. On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation. @@ -1206,14 +981,14 @@ void __pthread_wait_for_restart_signal(pthread_descr self) void __pthread_restart_old(pthread_descr th) { - if (pthread_atomic_increment(&th->p_resume_count) == -1) - kill(th->p_pid, __pthread_sig_restart); + if (atomic_increment(&th->p_resume_count) == -1) + kill(th->p_pid, __pthread_sig_restart); } void __pthread_suspend_old(pthread_descr self) { - if (pthread_atomic_decrement(&self->p_resume_count) <= 0) - __pthread_wait_for_restart_signal(self); + if (atomic_decrement(&self->p_resume_count) <= 0) + __pthread_wait_for_restart_signal(self); } int @@ -1223,7 +998,7 @@ __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime) int was_signalled = 0; sigjmp_buf jmpbuf; - if (pthread_atomic_decrement(&self->p_resume_count) == 0) { + if (atomic_decrement(&self->p_resume_count) == 0) { /* Set up a longjmp handler for the restart signal, unblock the signal and sleep. */ @@ -1240,7 +1015,7 @@ __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime) struct timespec reltime; /* Compute a time offset relative to now. */ - __gettimeofday (&now, NULL); + gettimeofday (&now, NULL); reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000; reltime.tv_sec = abstime->tv_sec - now.tv_sec; if (reltime.tv_nsec < 0) { @@ -1280,9 +1055,9 @@ __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime) being delivered. */ if (!was_signalled) { - if (pthread_atomic_increment(&self->p_resume_count) != -1) { + if (atomic_increment(&self->p_resume_count) != -1) { __pthread_wait_for_restart_signal(self); - pthread_atomic_decrement(&self->p_resume_count); /* should be zero now! */ + atomic_decrement(&self->p_resume_count); /* should be zero now! */ /* woke spontaneously and consumed restart signal */ return 1; } @@ -1292,92 +1067,99 @@ __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime) /* woken due to restart signal */ return 1; } -#endif /* __ASSUME_REALTIME_SIGNALS */ +#endif /* __NR_rt_sigaction */ + +#ifdef __NR_rt_sigaction void __pthread_restart_new(pthread_descr th) { - /* The barrier is proabably not needed, in which case it still documents - our assumptions. The intent is to commit previous writes to shared - memory so the woken thread will have a consistent view. Complementary - read barriers are present to the suspend functions. */ - WRITE_MEMORY_BARRIER(); - kill(th->p_pid, __pthread_sig_restart); + /* The barrier is proabably not needed, in which case it still documents + our assumptions. The intent is to commit previous writes to shared + memory so the woken thread will have a consistent view. Complementary + read barriers are present to the suspend functions. */ + WRITE_MEMORY_BARRIER(); + kill(th->p_pid, __pthread_sig_restart); } -/* There is no __pthread_suspend_new because it would just - be a wasteful wrapper for __pthread_wait_for_restart_signal */ - -int -__pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime) +int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime) { - sigset_t unblock, initial_mask; - int was_signalled = 0; - sigjmp_buf jmpbuf; + sigset_t unblock, initial_mask; + int was_signalled = 0; + sigjmp_buf jmpbuf; - if (sigsetjmp(jmpbuf, 1) == 0) { - THREAD_SETMEM(self, p_signal_jmp, &jmpbuf); - THREAD_SETMEM(self, p_signal, 0); - /* Unblock the restart signal */ - __sigemptyset(&unblock); - sigaddset(&unblock, __pthread_sig_restart); - sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask); - - while (1) { - struct timeval now; - struct timespec reltime; - - /* Compute a time offset relative to now. */ - __gettimeofday (&now, NULL); - reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000; - reltime.tv_sec = abstime->tv_sec - now.tv_sec; - if (reltime.tv_nsec < 0) { - reltime.tv_nsec += 1000000000; - reltime.tv_sec -= 1; - } + if (sigsetjmp(jmpbuf, 1) == 0) { + THREAD_SETMEM(self, p_signal_jmp, &jmpbuf); + THREAD_SETMEM(self, p_signal, 0); + /* Unblock the restart signal */ + __sigemptyset(&unblock); + sigaddset(&unblock, __pthread_sig_restart); + sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask); + + while (1) { + struct timeval now; + struct timespec reltime; + + /* Compute a time offset relative to now. */ + gettimeofday (&now, NULL); + reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000; + reltime.tv_sec = abstime->tv_sec - now.tv_sec; + if (reltime.tv_nsec < 0) { + reltime.tv_nsec += 1000000000; + reltime.tv_sec -= 1; + } - /* Sleep for the required duration. If woken by a signal, - resume waiting as required by Single Unix Specification. */ - if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0) - break; - } + /* Sleep for the required duration. If woken by a signal, + resume waiting as required by Single Unix Specification. */ + if (reltime.tv_sec < 0 || nanosleep(&reltime, NULL) == 0) + break; + } - /* Block the restart signal again */ - sigprocmask(SIG_SETMASK, &initial_mask, NULL); - was_signalled = 0; - } else { - was_signalled = 1; - } - THREAD_SETMEM(self, p_signal_jmp, NULL); + /* Block the restart signal again */ + sigprocmask(SIG_SETMASK, &initial_mask, NULL); + was_signalled = 0; + } else { + was_signalled = 1; + } + THREAD_SETMEM(self, p_signal_jmp, NULL); - /* Now was_signalled is true if we exited the above code - due to the delivery of a restart signal. In that case, - everything is cool. We have been removed from whatever - we were waiting on by the other thread, and consumed its signal. + /* Now was_signalled is true if we exited the above code + due to the delivery of a restart signal. In that case, + everything is cool. We have been removed from whatever + we were waiting on by the other thread, and consumed its signal. - Otherwise we this thread woke up spontaneously, or due to a signal other - than restart. This is an ambiguous case that must be resolved by - the caller; the thread is still eligible for a restart wakeup - so there is a race. */ + Otherwise we this thread woke up spontaneously, or due to a signal other + than restart. This is an ambiguous case that must be resolved by + the caller; the thread is still eligible for a restart wakeup + so there is a race. */ - READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */ - return was_signalled; + READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */ + return was_signalled; } - +#endif /* Debugging aid */ -#ifdef DEBUG +#ifdef DEBUG_PT #include <stdarg.h> -void __pthread_message(const char * fmt, ...) +void __pthread_message(char * fmt, ...) { char buffer[1024]; va_list args; - sprintf(buffer, "%05d : ", __getpid()); + sprintf(buffer, "%05d : ", getpid()); va_start(args, fmt); vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args); va_end(args); - TEMP_FAILURE_RETRY(write_not_cancel(2, buffer, strlen(buffer))); + TEMP_FAILURE_RETRY(write(2, buffer, strlen(buffer))); } #endif + + +#ifndef __PIC__ +/* We need a hook to force the cancellation wrappers to be linked in when + static libpthread is used. */ +extern const char __pthread_provide_wrappers; +static const char *const __pthread_require_wrappers = + &__pthread_provide_wrappers; +#endif |