summaryrefslogtreecommitdiff
path: root/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486
diff options
context:
space:
mode:
authorAustin Foxley <austinf@cetoncorp.com>2009-10-17 12:26:24 -0700
committerAustin Foxley <austinf@cetoncorp.com>2009-10-17 12:26:24 -0700
commitc68d0fa2d88fc2134a38d99e7e944828384a7671 (patch)
tree6596943bd1c77f18d6e49d4153ddd3f3d67b49f5 /libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486
parent9a03e98a3b418f33c347a6023e9320f3a42cb9e4 (diff)
libpthread/nptl: core of the "Native Posix Threading Library" for uClibc
targetting arm,sh,i386,mips,sparc for now Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
Diffstat (limited to 'libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486')
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S30
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S283
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S162
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S161
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S110
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S466
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S358
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S175
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S214
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S207
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S140
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S165
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S96
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S193
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S85
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S134
16 files changed, 2979 insertions, 0 deletions
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
new file mode 100644
index 000000000..223b11108
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
@@ -0,0 +1,30 @@
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* In libc.so we do not unconditionally use the lock prefix. Only if
+ the application is using threads. */
+#ifndef UP
+# define LOCK \
+ cmpl $0, %gs:MULTIPLE_THREADS_OFFSET; \
+ je,pt 0f; \
+ lock; \
+0:
+#endif
+
+#include "lowlevellock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
new file mode 100644
index 000000000..955e119ab
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
@@ -0,0 +1,283 @@
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+
+ .text
+
+#ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+
+ .globl __lll_mutex_lock_wait
+ .type __lll_mutex_lock_wait,@function
+ .hidden __lll_mutex_lock_wait
+ .align 16
+__lll_mutex_lock_wait:
+ pushl %edx
+ pushl %ebx
+ pushl %esi
+
+ movl $2, %edx
+ movl %ecx, %ebx
+ xorl %esi, %esi /* No timeout. */
+ xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+2: movl %edx, %eax
+ xchgl %eax, (%ebx) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz,pn 1b
+
+ popl %esi
+ popl %ebx
+ popl %edx
+ ret
+ .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
+
+
+#ifdef NOT_IN_libc
+ .globl __lll_mutex_timedlock_wait
+ .type __lll_mutex_timedlock_wait,@function
+ .hidden __lll_mutex_timedlock_wait
+ .align 16
+__lll_mutex_timedlock_wait:
+ /* Check for a valid timeout value. */
+ cmpl $1000000000, 4(%edx)
+ jae 3f
+
+ pushl %edi
+ pushl %esi
+ pushl %ebx
+ pushl %ebp
+
+ /* Stack frame for the timespec and timeval structs. */
+ subl $8, %esp
+
+ movl %ecx, %ebp
+ movl %edx, %edi
+
+1:
+ /* Get current time. */
+ movl %esp, %ebx
+ xorl %ecx, %ecx
+ movl $SYS_gettimeofday, %eax
+ ENTER_KERNEL
+
+ /* Compute relative timeout. */
+ movl 4(%esp), %eax
+ movl $1000, %edx
+ mul %edx /* Milli seconds to nano seconds. */
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ subl (%esp), %ecx
+ subl %eax, %edx
+ jns 4f
+ addl $1000000000, %edx
+ subl $1, %ecx
+4: testl %ecx, %ecx
+ js 5f /* Time is already up. */
+
+ /* Store relative timeout. */
+ movl %ecx, (%esp)
+ movl %edx, 4(%esp)
+
+ movl %ebp, %ebx
+
+ movl $1, %eax
+ movl $2, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ testl %eax, %eax
+ je 8f
+
+ /* Futex call. */
+ movl %esp, %esi
+ xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ movl %eax, %ecx
+
+8: /* NB: %edx == 2 */
+ xorl %eax, %eax
+ LOCK
+ cmpxchgl %edx, (%ebx)
+
+ jnz 7f
+
+6: addl $8, %esp
+ popl %ebp
+ popl %ebx
+ popl %esi
+ popl %edi
+ ret
+
+ /* Check whether the time expired. */
+7: cmpl $-ETIMEDOUT, %ecx
+ je 5f
+
+ /* Make sure the current holder knows we are going to sleep. */
+ movl %edx, %eax
+ xchgl %eax, (%ebx)
+ testl %eax, %eax
+ jz 6b
+ jmp 1b
+
+3: movl $EINVAL, %eax
+ ret
+
+5: movl $ETIMEDOUT, %eax
+ jmp 6b
+ .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+#endif
+
+
+#ifdef NOT_IN_libc
+ .globl lll_unlock_wake_cb
+ .type lll_unlock_wake_cb,@function
+ .hidden lll_unlock_wake_cb
+ .align 16
+lll_unlock_wake_cb:
+ pushl %ebx
+ pushl %ecx
+ pushl %edx
+
+ movl 20(%esp), %ebx
+ LOCK
+ subl $1, (%ebx)
+ je 1f
+
+ movl $FUTEX_WAKE, %ecx
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ movl $0, (%ebx)
+ ENTER_KERNEL
+
+1: popl %edx
+ popl %ecx
+ popl %ebx
+ ret
+ .size lll_unlock_wake_cb,.-lll_unlock_wake_cb
+#endif
+
+
+ .globl __lll_mutex_unlock_wake
+ .type __lll_mutex_unlock_wake,@function
+ .hidden __lll_mutex_unlock_wake
+ .align 16
+__lll_mutex_unlock_wake:
+ pushl %ebx
+ pushl %ecx
+ pushl %edx
+
+ movl %eax, %ebx
+ movl $0, (%eax)
+ movl $FUTEX_WAKE, %ecx
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+ popl %edx
+ popl %ecx
+ popl %ebx
+ ret
+ .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+
+
+#ifdef NOT_IN_libc
+ .globl __lll_timedwait_tid
+ .type __lll_timedwait_tid,@function
+ .hidden __lll_timedwait_tid
+ .align 16
+__lll_timedwait_tid:
+ pushl %edi
+ pushl %esi
+ pushl %ebx
+ pushl %ebp
+
+ movl %eax, %ebp
+ movl %edx, %edi
+ subl $8, %esp
+
+ /* Get current time. */
+2: movl %esp, %ebx
+ xorl %ecx, %ecx
+ movl $SYS_gettimeofday, %eax
+ ENTER_KERNEL
+
+ /* Compute relative timeout. */
+ movl 4(%esp), %eax
+ movl $1000, %edx
+ mul %edx /* Milli seconds to nano seconds. */
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ subl (%esp), %ecx
+ subl %eax, %edx
+ jns 5f
+ addl $1000000000, %edx
+ subl $1, %ecx
+5: testl %ecx, %ecx
+ js 6f /* Time is already up. */
+
+ movl %ecx, (%esp) /* Store relative timeout. */
+ movl %edx, 4(%esp)
+
+ movl (%ebp), %edx
+ testl %edx, %edx
+ jz 4f
+
+ movl %esp, %esi
+ xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl %ebp, %ebx
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+ cmpl $0, (%ebx)
+ jne 1f
+4: xorl %eax, %eax
+
+3: addl $8, %esp
+ popl %ebp
+ popl %ebx
+ popl %esi
+ popl %edi
+ ret
+
+1: cmpl $-ETIMEDOUT, %eax
+ jne 2b
+6: movl $ETIMEDOUT, %eax
+ jmp 3b
+ .size __lll_timedwait_tid,.-__lll_timedwait_tid
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
new file mode 100644
index 000000000..2af9e38cd
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
@@ -0,0 +1,162 @@
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelbarrier.h>
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+#ifndef UP
+# define LOCK lock
+#else
+# define LOCK
+#endif
+
+
+ .text
+
+ .globl pthread_barrier_wait
+ .type pthread_barrier_wait,@function
+ .align 16
+pthread_barrier_wait:
+ pushl %ebx
+
+ movl 8(%esp), %ebx
+
+ /* Get the mutex. */
+ movl $1, %edx
+ xorl %eax, %eax
+ LOCK
+ cmpxchgl %edx, MUTEX(%ebx)
+ jnz 1f
+
+ /* One less waiter. If this was the last one needed wake
+ everybody. */
+2: subl $1, LEFT(%ebx)
+ je 3f
+
+ /* There are more threads to come. */
+ pushl %esi
+
+#if CURR_EVENT == 0
+ movl (%ebx), %edx
+#else
+ movl CURR_EVENT(%ebx), %edx
+#endif
+
+ /* Release the mutex. */
+ LOCK
+ subl $1, MUTEX(%ebx)
+ jne 6f
+
+ /* Wait for the remaining threads. The call will return immediately
+ if the CURR_EVENT memory has meanwhile been changed. */
+7: xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ xorl %esi, %esi
+8: movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+ /* Don't return on spurious wakeups. The syscall does not change
+ any register except %eax so there is no need to reload any of
+ them. */
+#if CURR_EVENT == 0
+ cmpl %edx, (%ebx)
+#else
+ cmpl %edx, CURR_EVENT(%ebx)
+#endif
+ je,pn 8b
+
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%ebx), %ecx
+ LOCK
+ xaddl %edx, LEFT(%ebx)
+ subl $1, %ecx
+ cmpl %ecx, %edx
+ jne,pt 10f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
+ LOCK
+ subl $1, MUTEX(%ebx)
+ jne 9f
+
+ /* Note: %esi is still zero. */
+10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
+
+ popl %esi
+ popl %ebx
+ ret
+
+ /* The necessary number of threads arrived. */
+3:
+#if CURR_EVENT == 0
+ addl $1, (%ebx)
+#else
+ addl $1, CURR_EVENT(%ebx)
+#endif
+
+ /* Wake up all waiters. The count is a signed number in the kernel
+ so 0x7fffffff is the highest value. */
+ movl $0x7fffffff, %edx
+ movl $FUTEX_WAKE, %ecx
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%ebx), %ecx
+ LOCK
+ xaddl %edx, LEFT(%ebx)
+ subl $1, %ecx
+ cmpl %ecx, %edx
+ jne,pt 5f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
+ LOCK
+ subl $1, MUTEX(%ebx)
+ jne 4f
+
+5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
+
+ popl %ebx
+ ret
+
+1: leal MUTEX(%ebx), %ecx
+ call __lll_mutex_lock_wait
+ jmp 2b
+
+4: leal MUTEX(%ebx), %eax
+ call __lll_mutex_unlock_wake
+ jmp 5b
+
+6: leal MUTEX(%ebx), %eax
+ call __lll_mutex_unlock_wake
+ jmp 7b
+
+9: leal MUTEX(%ebx), %eax
+ call __lll_mutex_unlock_wake
+ jmp 10b
+ .size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
new file mode 100644
index 000000000..6e8ffe6f6
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
@@ -0,0 +1,161 @@
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelcond.h>
+#include <bits/kernel-features.h>
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK lock
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+#define FUTEX_REQUEUE 3
+#define FUTEX_CMP_REQUEUE 4
+
+#define EINVAL 22
+
+
+ .text
+
+ /* int pthread_cond_broadcast (pthread_cond_t *cond) */
+ .globl __pthread_cond_broadcast
+ .type __pthread_cond_broadcast, @function
+ .align 16
+__pthread_cond_broadcast:
+
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ pushl %ebp
+
+ movl 20(%esp), %ebx
+
+ /* Get internal lock. */
+ movl $1, %edx
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %edx, (%ebx)
+#else
+ cmpxchgl %edx, cond_lock(%ebx)
+#endif
+ jnz 1f
+
+2: addl $cond_futex, %ebx
+ movl total_seq+4-cond_futex(%ebx), %eax
+ movl total_seq-cond_futex(%ebx), %ebp
+ cmpl wakeup_seq+4-cond_futex(%ebx), %eax
+ ja 3f
+ jb 4f
+ cmpl wakeup_seq-cond_futex(%ebx), %ebp
+ jna 4f
+
+ /* Cause all currently waiting threads to recognize they are
+ woken up. */
+3: movl %ebp, wakeup_seq-cond_futex(%ebx)
+ movl %eax, wakeup_seq-cond_futex+4(%ebx)
+ movl %ebp, woken_seq-cond_futex(%ebx)
+ movl %eax, woken_seq-cond_futex+4(%ebx)
+ addl %ebp, %ebp
+ addl $1, broadcast_seq-cond_futex(%ebx)
+ movl %ebp, (%ebx)
+
+ /* Get the address of the mutex used. */
+ movl dep_mutex-cond_futex(%ebx), %edi
+
+ /* Unlock. */
+ LOCK
+ subl $1, cond_lock-cond_futex(%ebx)
+ jne 7f
+
+ /* Don't use requeue for pshared condvars. */
+8: cmpl $-1, %edi
+ je 9f
+
+ /* Wake up all threads. */
+ movl $FUTEX_CMP_REQUEUE, %ecx
+ movl $SYS_futex, %eax
+ movl $0x7fffffff, %esi
+ movl $1, %edx
+ /* Get the address of the futex involved. */
+# if MUTEX_FUTEX != 0
+ addl $MUTEX_FUTEX, %edi
+# endif
+/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for sysenter.
+ ENTER_KERNEL */
+ int $0x80
+
+ /* For any kind of error, which mainly is EAGAIN, we try again
+ with WAKE. The general test also covers running on old
+ kernels. */
+ cmpl $0xfffff001, %eax
+ jae 9f
+
+10: xorl %eax, %eax
+ popl %ebp
+ popl %edi
+ popl %esi
+ popl %ebx
+ ret
+
+ .align 16
+ /* Unlock. */
+4: LOCK
+ subl $1, cond_lock-cond_futex(%ebx)
+ jne 5f
+
+6: xorl %eax, %eax
+ popl %ebp
+ popl %edi
+ popl %esi
+ popl %ebx
+ ret
+
+ /* Initial locking failed. */
+1:
+#if cond_lock == 0
+ movl %ebx, %ecx
+#else
+ leal cond_lock(%ebx), %ecx
+#endif
+ call __lll_mutex_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires waekup. */
+5: leal cond_lock-cond_futex(%ebx), %eax
+ call __lll_mutex_unlock_wake
+ jmp 6b
+
+ /* Unlock in loop requires waekup. */
+7: leal cond_lock-cond_futex(%ebx), %eax
+ call __lll_mutex_unlock_wake
+ jmp 8b
+
+9: /* The futex requeue functionality is not available. */
+ movl $0x7fffffff, %edx
+ movl $FUTEX_WAKE, %ecx
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ jmp 10b
+ .size __pthread_cond_broadcast, .-__pthread_cond_broadcast
+weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
new file mode 100644
index 000000000..11650c8e1
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
@@ -0,0 +1,110 @@
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelcond.h>
+#include <bits/kernel-features.h>
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK lock
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+#define FUTEX_REQUEUE 3
+
+#define EINVAL 22
+
+
+ .text
+
+ /* int pthread_cond_signal (pthread_cond_t *cond) */
+ .globl __pthread_cond_signal
+ .type __pthread_cond_signal, @function
+ .align 16
+__pthread_cond_signal:
+
+ pushl %ebx
+ pushl %edi
+
+ movl 12(%esp), %edi
+
+ /* Get internal lock. */
+ movl $1, %edx
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %edx, (%edi)
+#else
+ cmpxchgl %edx, cond_lock(%edi)
+#endif
+ jnz 1f
+
+2: leal cond_futex(%edi), %ebx
+ movl total_seq+4(%edi), %eax
+ movl total_seq(%edi), %ecx
+ cmpl wakeup_seq+4(%edi), %eax
+#if cond_lock != 0
+ /* Must use leal to preserve the flags. */
+ leal cond_lock(%edi), %edi
+#endif
+ ja 3f
+ jb 4f
+ cmpl wakeup_seq-cond_futex(%ebx), %ecx
+ jbe 4f
+
+ /* Bump the wakeup number. */
+3: addl $1, wakeup_seq-cond_futex(%ebx)
+ adcl $0, wakeup_seq-cond_futex+4(%ebx)
+ addl $1, (%ebx)
+
+ /* Wake up one thread. */
+ movl $FUTEX_WAKE, %ecx
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ ENTER_KERNEL
+
+ /* Unlock. Note that at this point %edi always points to
+ cond_lock. */
+4: LOCK
+ subl $1, (%edi)
+ jne 5f
+
+6: xorl %eax, %eax
+ popl %edi
+ popl %ebx
+ ret
+
+ /* Initial locking failed. */
+1:
+#if cond_lock == 0
+ movl %edi, %ecx
+#else
+ leal cond_lock(%edi), %ecx
+#endif
+ call __lll_mutex_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+5: movl %edi, %eax
+ call __lll_mutex_unlock_wake
+ jmp 6b
+ .size __pthread_cond_signal, .-__pthread_cond_signal
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
new file mode 100644
index 000000000..fc4f21b58
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
@@ -0,0 +1,466 @@
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <lowlevelcond.h>
+#include <pthread-errnos.h>
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK lock
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+
+ .text
+
+/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime) */
+ .globl __pthread_cond_timedwait
+ .type __pthread_cond_timedwait, @function
+ .align 16
+__pthread_cond_timedwait:
+.LSTARTCODE:
+ pushl %ebp
+.Lpush_ebp:
+ pushl %edi
+.Lpush_edi:
+ pushl %esi
+.Lpush_esi:
+ pushl %ebx
+.Lpush_ebx:
+
+ movl 20(%esp), %ebx
+ movl 28(%esp), %ebp
+
+ cmpl $1000000000, 4(%ebp)
+ movl $EINVAL, %eax
+ jae 18f
+
+ /* Get internal lock. */
+ movl $1, %edx
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %edx, (%ebx)
+#else
+ cmpxchgl %edx, cond_lock(%ebx)
+#endif
+ jnz 1f
+
+ /* Store the reference to the mutex. If there is already a
+ different value in there this is a bad user bug. */
+2: cmpl $-1, dep_mutex(%ebx)
+ movl 24(%esp), %eax
+ je 17f
+ movl %eax, dep_mutex(%ebx)
+
+ /* Unlock the mutex. */
+17: xorl %edx, %edx
+ call __pthread_mutex_unlock_usercnt
+
+ testl %eax, %eax
+ jne 16f
+
+ addl $1, total_seq(%ebx)
+ adcl $0, total_seq+4(%ebx)
+ addl $1, cond_futex(%ebx)
+ addl $(1 << clock_bits), cond_nwaiters(%ebx)
+
+#define FRAME_SIZE 24
+ subl $FRAME_SIZE, %esp
+.Lsubl:
+
+ /* Get and store current wakeup_seq value. */
+ movl wakeup_seq(%ebx), %edi
+ movl wakeup_seq+4(%ebx), %edx
+ movl broadcast_seq(%ebx), %eax
+ movl %edi, 12(%esp)
+ movl %edx, 16(%esp)
+ movl %eax, 20(%esp)
+
+ /* Get the current time. */
+8: movl %ebx, %edx
+#ifdef __NR_clock_gettime
+ /* Get the clock number. */
+ movl cond_nwaiters(%ebx), %ebx
+ andl $((1 << clock_bits) - 1), %ebx
+ /* Only clocks 0 and 1 are allowed so far. Both are handled in the
+ kernel. */
+ leal 4(%esp), %ecx
+ movl $__NR_clock_gettime, %eax
+ ENTER_KERNEL
+# ifndef __ASSUME_POSIX_TIMERS
+ cmpl $-ENOSYS, %eax
+ je 19f
+# endif
+ movl %edx, %ebx
+
+ /* Compute relative timeout. */
+ movl (%ebp), %ecx
+ movl 4(%ebp), %edx
+ subl 4(%esp), %ecx
+ subl 8(%esp), %edx
+#else
+ /* Get the current time. */
+ leal 4(%esp), %ebx
+ xorl %ecx, %ecx
+ movl $SYS_gettimeofday, %eax
+ ENTER_KERNEL
+ movl %edx, %ebx
+
+ /* Compute relative timeout. */
+ movl 8(%esp), %eax
+ movl $1000, %edx
+ mul %edx /* Milli seconds to nano seconds. */
+ movl (%ebp), %ecx
+ movl 4(%ebp), %edx
+ subl 4(%esp), %ecx
+ subl %eax, %edx
+#endif
+ jns 12f
+ addl $1000000000, %edx
+ subl $1, %ecx
+12: testl %ecx, %ecx
+ movl $-ETIMEDOUT, %esi
+ js 6f
+
+ /* Store relative timeout. */
+21: movl %ecx, 4(%esp)
+ movl %edx, 8(%esp)
+
+ movl cond_futex(%ebx), %edi
+
+ /* Unlock. */
+ LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
+ subl $1, cond_lock(%ebx)
+#endif
+ jne 3f
+
+.LcleanupSTART:
+4: call __pthread_enable_asynccancel
+ movl %eax, (%esp)
+
+ leal 4(%esp), %esi
+ xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl %edi, %edx
+ addl $cond_futex, %ebx
+.Ladd_cond_futex:
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ subl $cond_futex, %ebx
+.Lsub_cond_futex:
+ movl %eax, %esi
+
+ movl (%esp), %eax
+ call __pthread_disable_asynccancel
+.LcleanupEND:
+
+ /* Lock. */
+ movl $1, %edx
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %edx, (%ebx)
+#else
+ cmpxchgl %edx, cond_lock(%ebx)
+#endif
+ jnz 5f
+
+6: movl broadcast_seq(%ebx), %eax
+ cmpl 20(%esp), %eax
+ jne 23f
+
+ movl woken_seq(%ebx), %eax
+ movl woken_seq+4(%ebx), %ecx
+
+ movl wakeup_seq(%ebx), %edi
+ movl wakeup_seq+4(%ebx), %edx
+
+ cmpl 16(%esp), %edx
+ jne 7f
+ cmpl 12(%esp), %edi
+ je 15f
+
+7: cmpl %ecx, %edx
+ jne 9f
+ cmp %eax, %edi
+ jne 9f
+
+15: cmpl $-ETIMEDOUT, %esi
+ jne 8b
+
+ addl $1, wakeup_seq(%ebx)
+ adcl $0, wakeup_seq+4(%ebx)
+ addl $1, cond_futex(%ebx)
+ movl $ETIMEDOUT, %esi
+ jmp 14f
+
+23: xorl %esi, %esi
+ jmp 24f
+
+9: xorl %esi, %esi
+14: addl $1, woken_seq(%ebx)
+ adcl $0, woken_seq+4(%ebx)
+
+24: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ movl total_seq(%ebx), %eax
+ andl total_seq+4(%ebx), %eax
+ cmpl $0xffffffff, %eax
+ jne 25f
+ movl cond_nwaiters(%ebx), %eax
+ andl $~((1 << clock_bits) - 1), %eax
+ jne 25f
+
+ addl $cond_nwaiters, %ebx
+ movl $SYS_futex, %eax
+ movl $FUTEX_WAKE, %ecx
+ movl $1, %edx
+ ENTER_KERNEL
+ subl $cond_nwaiters, %ebx
+
+25: LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
+ subl $1, cond_lock(%ebx)
+#endif
+ jne 10f
+
+ /* Remove cancellation handler. */
+11: movl 24+FRAME_SIZE(%esp), %eax
+ call __pthread_mutex_cond_lock
+ addl $FRAME_SIZE, %esp
+.Laddl:
+
+ /* We return the result of the mutex_lock operation if it failed. */
+ testl %eax, %eax
+#ifdef HAVE_CMOV
+ cmovel %esi, %eax
+#else
+ jne 22f
+ movl %esi, %eax
+22:
+#endif
+
+18: popl %ebx
+.Lpop_ebx:
+ popl %esi
+.Lpop_esi:
+ popl %edi
+.Lpop_edi:
+ popl %ebp
+.Lpop_ebp:
+
+ ret
+
+ /* Initial locking failed. */
+1:
+.LSbl1:
+#if cond_lock == 0
+ movl %ebx, %ecx
+#else
+ leal cond_lock(%ebx), %ecx
+#endif
+ call __lll_mutex_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+3:
+.LSbl2:
+#if cond_lock == 0
+ movl %ebx, %eax
+#else
+ leal cond_lock(%ebx), %eax
+#endif
+ call __lll_mutex_unlock_wake
+ jmp 4b
+
+ /* Locking in loop failed. */