diff options
Diffstat (limited to 'libpthread/nptl/pthread_create.c')
-rw-r--r-- | libpthread/nptl/pthread_create.c | 122 |
1 files changed, 112 insertions, 10 deletions
diff --git a/libpthread/nptl/pthread_create.c b/libpthread/nptl/pthread_create.c index 903e28dc6..a61cd715d 100644 --- a/libpthread/nptl/pthread_create.c +++ b/libpthread/nptl/pthread_create.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -27,6 +27,7 @@ #include <atomic.h> #include <libc-internal.h> #include <resolv.h> +#include <bits/kernel-features.h> /* Local function to start thread and handle cleanup. */ @@ -37,10 +38,10 @@ static int start_thread (void *arg); int __pthread_debug; /* Globally enabled events. */ -static td_thr_events_t __nptl_threads_events; +static td_thr_events_t __nptl_threads_events __attribute_used__; /* Pointer to descriptor with the last event. */ -static struct pthread *__nptl_last_event; +static struct pthread *__nptl_last_event __attribute_used__; /* Number of threads running. */ unsigned int __nptl_nthreads = 1; @@ -50,17 +51,18 @@ unsigned int __nptl_nthreads = 1; #include "allocatestack.c" /* Code to create the thread. */ -#include "createthread.c" +#include <createthread.c> struct pthread * internal_function -__find_in_stack_list (struct pthread *pd) +__find_in_stack_list ( + struct pthread *pd) { list_t *entry; struct pthread *result = NULL; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); list_for_each (entry, &stack_used) { @@ -87,7 +89,7 @@ __find_in_stack_list (struct pthread *pd) } } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return result; } @@ -203,6 +205,15 @@ __free_tcb (struct pthread *pd) running thread is gone. */ abort (); + /* Free TPP data. */ + if (__builtin_expect (pd->tpp != NULL, 0)) + { + struct priority_protection_data *tpp = pd->tpp; + + pd->tpp = NULL; + free (tpp); + } + /* Queue the stack memory block for reuse and exit the process. The kernel will signal via writing to the address returned by QUEUE-STACK when the stack is available. */ @@ -226,6 +237,32 @@ start_thread (void *arg) /* Initialize resolver state pointer. */ __resp = &pd->res; +#ifdef __NR_set_robust_list +# ifndef __ASSUME_SET_ROBUST_LIST + if (__set_robust_list_avail >= 0) +# endif + { + INTERNAL_SYSCALL_DECL (err); + /* This call should never fail because the initial call in init.c + succeeded. */ + INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, + sizeof (struct robust_list_head)); + } +#endif + + /* If the parent was running cancellation handlers while creating + the thread the new thread inherited the signal mask. Reset the + cancellation signal mask. */ + if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0)) + { + INTERNAL_SYSCALL_DECL (err); + sigset_t mask; + __sigemptyset (&mask); + __sigaddset (&mask, SIGCANCEL); + (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask, + NULL, _NSIG / 8); + } + /* This is where the try/finally block should be created. For compilers without that support we do use setjmp. */ struct pthread_unwind_buf unwind_buf; @@ -246,9 +283,9 @@ start_thread (void *arg) int oldtype = CANCEL_ASYNC (); /* Get the lock the parent locked to force synchronization. */ - lll_lock (pd->lock); + lll_lock (pd->lock, LLL_PRIVATE); /* And give it up right away. */ - lll_unlock (pd->lock); + lll_unlock (pd->lock, LLL_PRIVATE); CANCEL_RESET (oldtype); } @@ -264,6 +301,9 @@ start_thread (void *arg) /* Run the destructor for the thread-local data. */ __nptl_deallocate_tsd (); + /* Clean up any state libc stored in thread-local variables. */ + __libc_thread_freeres (); + /* If this is the last thread we terminate the process now. We do not notify the debugger, it might just irritate it if there is no thread left. */ @@ -304,10 +344,65 @@ start_thread (void *arg) the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */ atomic_bit_set (&pd->cancelhandling, EXITING_BIT); +#ifndef __ASSUME_SET_ROBUST_LIST + /* If this thread has any robust mutexes locked, handle them now. */ +# if __WORDSIZE == 64 + void *robust = pd->robust_head.list; +# else + __pthread_slist_t *robust = pd->robust_list.__next; +# endif + /* We let the kernel do the notification if it is able to do so. + If we have to do it here there for sure are no PI mutexes involved + since the kernel support for them is even more recent. */ + if (__set_robust_list_avail < 0 + && __builtin_expect (robust != (void *) &pd->robust_head, 0)) + { + do + { + struct __pthread_mutex_s *this = (struct __pthread_mutex_s *) + ((char *) robust - offsetof (struct __pthread_mutex_s, + __list.__next)); + robust = *((void **) robust); + +# ifdef __PTHREAD_MUTEX_HAVE_PREV + this->__list.__prev = NULL; +# endif + this->__list.__next = NULL; + + lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED); + } + while (robust != (void *) &pd->robust_head); + } +#endif + + /* Mark the memory of the stack as usable to the kernel. We free + everything except for the space used for the TCB itself. */ + size_t pagesize_m1 = __getpagesize () - 1; +#ifdef _STACK_GROWS_DOWN + char *sp = CURRENT_STACK_FRAME; + size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1; +#else +# error "to do" +#endif + assert (freesize < pd->stackblock_size); + if (freesize > PTHREAD_STACK_MIN) + madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED); + /* If the thread is detached free the TCB. */ if (IS_DETACHED (pd)) /* Free the TCB. */ __free_tcb (pd); + else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0)) + { + /* Some other thread might call any of the setXid functions and expect + us to reply. In this case wait until we did that. */ + do + lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE); + while (pd->cancelhandling & SETXID_BITMASK); + + /* Reset the value so that the stack can be reused. */ + pd->setxid_futex = 0; + } /* We cannot call '_exit' here. '_exit' will terminate the process. @@ -348,7 +443,7 @@ __pthread_create_2_1 ( accessing far-away memory. */ iattr = &default_attr; - struct pthread *pd = 0; + struct pthread *pd = NULL; int err = ALLOCATE_STACK (iattr, &pd); if (__builtin_expect (err != 0, 0)) /* Something went wrong. Maybe a parameter of the attributes is @@ -398,6 +493,11 @@ __pthread_create_2_1 ( THREAD_COPY_STACK_GUARD (pd); #endif + /* Copy the pointer guard value. */ +#ifdef THREAD_COPY_POINTER_GUARD + THREAD_COPY_POINTER_GUARD (pd); +#endif + /* Determine scheduling parameters for the thread. */ if (attr != NULL && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0) @@ -468,12 +568,14 @@ weak_alias(__pthread_create_2_1, pthread_create) /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread functions to be present as well. */ PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock) +PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock) PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock) PTHREAD_STATIC_FN_REQUIRE (pthread_once) PTHREAD_STATIC_FN_REQUIRE (pthread_cancel) PTHREAD_STATIC_FN_REQUIRE (pthread_key_create) +PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete) PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific) PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific) |