summaryrefslogtreecommitdiff
path: root/libpthread/linuxthreads/sysdeps/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/linuxthreads/sysdeps/x86_64')
-rw-r--r--libpthread/linuxthreads/sysdeps/x86_64/pspinlock.c96
-rw-r--r--libpthread/linuxthreads/sysdeps/x86_64/pt-machine.h156
-rw-r--r--libpthread/linuxthreads/sysdeps/x86_64/tcb-offsets.sym4
3 files changed, 246 insertions, 10 deletions
diff --git a/libpthread/linuxthreads/sysdeps/x86_64/pspinlock.c b/libpthread/linuxthreads/sysdeps/x86_64/pspinlock.c
new file mode 100644
index 000000000..80c1b05a8
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/x86_64/pspinlock.c
@@ -0,0 +1,96 @@
+/* POSIX spinlock implementation. x86-64 version.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <errno.h>
+#include <pthread.h>
+#include "internals.h"
+
+/* This implementation is similar to the one used in the Linux kernel.
+ But the kernel is byte instructions for the memory access. This is
+ faster but unusable here. The problem is that only 128
+ threads/processes could use the spinlock at the same time. If (by
+ a design error in the program) a thread/process would hold the
+ spinlock for a time long enough to accumulate 128 waiting
+ processes, the next one will find a positive value in the spinlock
+ and assume it is unlocked. We cannot accept that. */
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ __asm__ __volatile__
+ ("\n"
+ "1:\n\t"
+ "lock; decl %0\n\t"
+ "js 2f\n\t"
+ ".section .text.spinlock,\"ax\"\n"
+ "2:\n\t"
+ "cmpl $0,%0\n\t"
+ "rep; nop\n\t"
+ "jle 2b\n\t"
+ "jmp 1b\n\t"
+ ".previous"
+ : "=m" (*lock));
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ int oldval;
+
+ __asm__ __volatile__
+ ("xchgl %0,%1"
+ : "=r" (oldval), "=m" (*lock)
+ : "0" (0));
+ return oldval > 0 ? 0 : EBUSY;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ __asm__ __volatile__
+ ("movl $1,%0"
+ : "=m" (*lock));
+ return 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ *lock = 1;
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
diff --git a/libpthread/linuxthreads/sysdeps/x86_64/pt-machine.h b/libpthread/linuxthreads/sysdeps/x86_64/pt-machine.h
index ed1fa30b1..0e0851e1e 100644
--- a/libpthread/linuxthreads/sysdeps/x86_64/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/x86_64/pt-machine.h
@@ -20,7 +20,7 @@
#ifndef _PT_MACHINE_H
#define _PT_MACHINE_H 1
-#include <features.h>
+# include <features.h>
#ifndef __ASSEMBLER__
# include <stddef.h> /* For offsetof. */
@@ -32,9 +32,6 @@
# define PT_EI __extern_always_inline
# endif
-extern long int testandset (int *);
-extern int __compare_and_swap (long int *, long int, long int);
-
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
# define CURRENT_STACK_FRAME stack_pointer
@@ -43,14 +40,14 @@ register char * stack_pointer __asm__ ("%rsp") __attribute_used__;
/* Spinlock implementation; required. */
PT_EI long int
-testandset (int *__spinlock)
+testandset (int *spinlock)
{
long int ret;
__asm__ __volatile__ (
"xchgl %k0, %1"
- : "=r"(ret), "=m"(*__spinlock)
- : "0"(1), "m"(*__spinlock)
+ : "=r"(ret), "=m"(*spinlock)
+ : "0"(1), "m"(*spinlock)
: "memory");
return ret;
@@ -61,18 +58,157 @@ testandset (int *__spinlock)
# define HAS_COMPARE_AND_SWAP
PT_EI int
-__compare_and_swap (long int *__p, long int __oldval, long int __newval)
+__compare_and_swap (long int *p, long int oldval, long int newval)
{
char ret;
long int readval;
__asm__ __volatile__ ("lock; cmpxchgq %3, %1; sete %0"
- : "=q" (ret), "=m" (*__p), "=a" (readval)
- : "r" (__newval), "m" (*__p), "a" (__oldval)
+ : "=q" (ret), "=m" (*p), "=a" (readval)
+ : "r" (newval), "m" (*p), "a" (oldval)
: "memory");
return ret;
}
+/* Return the thread descriptor for the current thread.
+
+ The contained asm must *not* be marked volatile since otherwise
+ assignments like
+ pthread_descr self = thread_self();
+ do not get optimized away. */
+# define THREAD_SELF \
+({ \
+ register pthread_descr __self; \
+ __asm__ ("movq %%fs:%c1,%0" : "=r" (__self) \
+ : "i" (offsetof (struct _pthread_descr_struct, \
+ p_header.data.self))); \
+ __self; \
+})
+
+/* Prototype for the system call. */
+extern int arch_prctl (int __code, unsigned long __addr);
+
+/* Initialize the thread-unique value. */
+# define INIT_THREAD_SELF(descr, nr) \
+{ \
+ if (arch_prctl (ARCH_SET_FS, (unsigned long)descr) != 0) \
+ abort (); \
+}
+
+/* Read member of the thread descriptor directly. */
+# define THREAD_GETMEM(descr, member) \
+({ \
+ __typeof__ (descr->member) __value; \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %%fs:%P2,%b0" \
+ : "=q" (__value) \
+ : "0" (0), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %%fs:%P2,%k0" \
+ : "=r" (__value) \
+ : "0" (0), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movq %%fs:%P1,%0" \
+ : "=r" (__value) \
+ : "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ } \
+ __value; \
+})
+
+/* Same as THREAD_GETMEM, but the member offset can be non-constant. */
+# define THREAD_GETMEM_NC(descr, member) \
+({ \
+ __typeof__ (descr->member) __value; \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %%fs:(%2),%b0" \
+ : "=q" (__value) \
+ : "0" (0), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %%fs:(%2),%k0" \
+ : "=r" (__value) \
+ : "0" (0), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movq %%fs:(%1),%0" \
+ : "=r" (__value) \
+ : "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ } \
+ __value; \
+})
+
+/* Set member of the thread descriptor directly. */
+# define THREAD_SETMEM(descr, member, value) \
+({ \
+ __typeof__ (descr->member) __value = (value); \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %0,%%fs:%P1" : \
+ : "q" (__value), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %k0,%%fs:%P1" : \
+ : "r" (__value), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movq %0,%%fs:%P1" : \
+ : "r" (__value), \
+ "i" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ } \
+})
+
+/* Same as THREAD_SETMEM, but the member offset can be non-constant. */
+# define THREAD_SETMEM_NC(descr, member, value) \
+({ \
+ __typeof__ (descr->member) __value = (value); \
+ if (sizeof (__value) == 1) \
+ __asm__ __volatile__ ("movb %0,%%fs:(%1)" : \
+ : "q" (__value), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else if (sizeof (__value) == 4) \
+ __asm__ __volatile__ ("movl %k0,%%fs:(%1)" : \
+ : "r" (__value), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ else \
+ { \
+ if (sizeof (__value) != 8) \
+ /* There should not be any value with a size other than 1, 4 or 8. */\
+ abort (); \
+ \
+ __asm__ __volatile__ ("movq %0,%%fs:(%1)" : \
+ : "r" (__value), \
+ "r" (offsetof (struct _pthread_descr_struct, \
+ member))); \
+ } \
+})
+
#endif /* !__ASSEMBLER__ */
/* We want the OS to assign stack addresses. */
diff --git a/libpthread/linuxthreads/sysdeps/x86_64/tcb-offsets.sym b/libpthread/linuxthreads/sysdeps/x86_64/tcb-offsets.sym
new file mode 100644
index 000000000..aee6be257
--- /dev/null
+++ b/libpthread/linuxthreads/sysdeps/x86_64/tcb-offsets.sym
@@ -0,0 +1,4 @@
+#include <sysdep.h>
+#include <tls.h>
+
+MULTIPLE_THREADS_OFFSET offsetof (tcbhead_t, multiple_threads)