diff options
author | Khem Raj <raj.khem@gmail.com> | 2010-05-05 22:50:19 -0700 |
---|---|---|
committer | Khem Raj <raj.khem@gmail.com> | 2010-05-09 22:53:25 -0700 |
commit | e3dec33ba03ef7080a39444388a01e59135bb3aa (patch) | |
tree | 9693449a78a556a91ddba349116d9c988399071a | |
parent | d2ab6ee38dd6ef107bac77f485ab1bb35b177e6c (diff) |
powerpc: Add TLS and NPTL support
Signed-off-by: Khem Raj <raj.khem@gmail.com>
Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
42 files changed, 957 insertions, 396 deletions
diff --git a/Makefile.in b/Makefile.in index 5474d37bc..126670970 100644 --- a/Makefile.in +++ b/Makefile.in @@ -214,8 +214,6 @@ HEADERS_RM- := \ bits/utmpx.h \ bits/uClibc_errno.h \ bits/uClibc_uintmaxtostr.h \ - atomic.h \ - bits/atomic.h \ bits/sigcontextinfo.h \ bits/stackinfo.h \ tls.h \ @@ -257,6 +255,9 @@ HEADERS_RM-$(UCLIBC_LINUX_SPECIFIC) += \ sys/sysctl.h \ sys/sysinfo.h \ sys/vfs.h +HEADERS_RM-$(UCLIBC_HAS_THREADS_NATIVE) += \ + atomic.h \ + bits/atomic.h HEADERS_RM-$(HAVE_SHARED) += dlfcn.h bits/dlfcn.h HEADERS_RM-$(PTHREADS_DEBUG_SUPPORT) += thread_db.h HEADERS_RM-$(UCLIBC_HAS_BSD_ERR) += err.h diff --git a/ldso/include/ldso.h b/ldso/include/ldso.h index 4675dd7f4..69b5dd75a 100644 --- a/ldso/include/ldso.h +++ b/ldso/include/ldso.h @@ -78,6 +78,10 @@ extern void _dl_add_to_slotinfo (struct link_map *l); extern void ** __attribute__ ((const)) _dl_initial_error_catch_tsd (void); #endif +#ifdef USE_TLS +void _dl_add_to_slotinfo (struct link_map *l); +void ** __attribute__ ((const)) _dl_initial_error_catch_tsd (void); +#endif #ifdef __SUPPORT_LD_DEBUG__ extern char *_dl_debug; extern char *_dl_debug_symbols; diff --git a/ldso/ldso/powerpc/dl-sysdep.h b/ldso/ldso/powerpc/dl-sysdep.h index f33214cbe..a665d4e75 100644 --- a/ldso/ldso/powerpc/dl-sysdep.h +++ b/ldso/ldso/powerpc/dl-sysdep.h @@ -77,6 +77,8 @@ void _dl_init_got(unsigned long *lpnt,struct elf_resolve *tpnt); #define elf_machine_type_class(type) \ ((((type) == R_PPC_JMP_SLOT \ || (type) == R_PPC_REL24 \ + || ((type) >= R_PPC_DTPMOD32 /* contiguous TLS */ \ + && (type) <= R_PPC_DTPREL32) \ || (type) == R_PPC_ADDR24) * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_PPC_COPY) * ELF_RTYPE_CLASS_COPY)) diff --git a/ldso/ldso/powerpc/elfinterp.c b/ldso/ldso/powerpc/elfinterp.c index 0dcb175bf..855c040d3 100644 --- a/ldso/ldso/powerpc/elfinterp.c +++ b/ldso/ldso/powerpc/elfinterp.c @@ -30,6 +30,8 @@ */ #include "ldso.h" +#define TLS_DTV_OFFSET 0x8000 +#define TLS_TP_OFFSET 0x7000 extern int _dl_linux_resolve(void); @@ -157,15 +159,15 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry) *reloc_addr = OPCODE_BA (finaladdr); } else { /* Warning: we don't handle double-sized PLT entries */ - Elf32_Word *plt, *data_words, index, offset; + Elf32_Word *plt, *data_words, idx, offset; plt = (Elf32_Word *)tpnt->dynamic_info[DT_PLTGOT]; offset = reloc_addr - plt; - index = (offset - PLT_INITIAL_ENTRY_WORDS)/2; + idx = (offset - PLT_INITIAL_ENTRY_WORDS)/2; data_words = (Elf32_Word *)tpnt->data_words; reloc_addr += 1; - data_words[index] = finaladdr; + data_words[idx] = finaladdr; PPC_SYNC; *reloc_addr = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1)) * 4); } @@ -185,28 +187,36 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, { int reloc_type; int symtab_index; - char *symname; + ElfW(Sym) *sym; Elf32_Addr *reloc_addr; Elf32_Addr finaladdr; - + struct elf_resolve *tls_tpnt = NULL; unsigned long symbol_addr; + char *symname; #if defined (__SUPPORT_LD_DEBUG__) unsigned long old_val; #endif - reloc_addr = (Elf32_Addr *)(intptr_t) (tpnt->loadaddr + (unsigned long) rpnt->r_offset); - reloc_type = ELF32_R_TYPE(rpnt->r_info); + symbol_addr = tpnt->loadaddr; /* For R_PPC_RELATIVE */ + reloc_addr = (Elf32_Addr *)(intptr_t) (symbol_addr + (unsigned long) rpnt->r_offset); + reloc_type = ELF32_R_TYPE(rpnt->r_info); symtab_index = ELF32_R_SYM(rpnt->r_info); - symname = strtab + symtab[symtab_index].st_name; + sym = &symtab[symtab_index]; + symname = strtab + sym->st_name; if (symtab_index) { symbol_addr = (unsigned long) _dl_find_hash(symname, scope, tpnt, - elf_machine_type_class(reloc_type), NULL); + elf_machine_type_class(reloc_type), &tls_tpnt); /* We want to allow undefined references to weak symbols - this might * have been intentional. We should not be linking local symbols * here, so all bases should be covered. */ - if (unlikely(!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK)) + if (unlikely(!symbol_addr + && (ELF32_ST_TYPE(sym->st_info) != STT_TLS + && ELF32_ST_BIND(sym->st_info) != STB_WEAK))) return 1; + } else { + symbol_addr = sym->st_value; + tls_tpnt = tpnt; } #if defined (__SUPPORT_LD_DEBUG__) old_val = *reloc_addr; @@ -232,15 +242,15 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, *reloc_addr = OPCODE_BA (finaladdr); } else { /* Warning: we don't handle double-sized PLT entries */ - Elf32_Word *plt, *data_words, index, offset; + Elf32_Word *plt, *data_words, idx, offset; plt = (Elf32_Word *)tpnt->dynamic_info[DT_PLTGOT]; offset = reloc_addr - plt; - index = (offset - PLT_INITIAL_ENTRY_WORDS)/2; + idx = (offset - PLT_INITIAL_ENTRY_WORDS)/2; data_words = (Elf32_Word *)tpnt->data_words; - data_words[index] = finaladdr; - reloc_addr[0] = OPCODE_LI(11,index*4); + data_words[idx] = finaladdr; + reloc_addr[0] = OPCODE_LI(11,idx*4); reloc_addr[1] = OPCODE_B((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1)) * 4); /* instructions were modified */ @@ -255,10 +265,10 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_move) _dl_dprintf(_dl_debug_file,"\n%s move %x bytes from %x to %x", - symname, symtab[symtab_index].st_size, + symname, sym->st_size, symbol_addr, reloc_addr); #endif - _dl_memcpy((char *) reloc_addr, (char *) finaladdr, symtab[symtab_index].st_size); + _dl_memcpy((char *) reloc_addr, (char *) finaladdr, sym->st_size); goto out_nocode; /* No code code modified */ case R_PPC_ADDR16_HA: finaladdr += 0x8000; /* fall through. */ @@ -267,6 +277,19 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, case R_PPC_ADDR16_LO: *(short *)reloc_addr = finaladdr; break; +#if USE_TLS + case R_PPC_DTPMOD32: + *reloc_addr = tls_tpnt->l_tls_modid; + break; + case R_PPC_DTPREL32: + /* During relocation all TLS symbols are defined and used. + Therefore the offset is already correct. */ + *reloc_addr = finaladdr - TLS_DTV_OFFSET; + break; + case R_PPC_TPREL32: + *reloc_addr = tls_tpnt->l_tls_offset + finaladdr - TLS_TP_OFFSET; + break; +#endif case R_PPC_REL24: #if 0 { diff --git a/libc/sysdeps/linux/common/Makefile.in b/libc/sysdeps/linux/common/Makefile.in index a6fa6d091..6f833cec4 100644 --- a/libc/sysdeps/linux/common/Makefile.in +++ b/libc/sysdeps/linux/common/Makefile.in @@ -98,6 +98,9 @@ ifeq ($(TARGET_ARCH),i386) CSRC := $(filter-out vfork.c,$(CSRC)) endif +# provided via pthreads builddir +CSRC := $(filter-out $(libc_a_CSRC) $(libc_a_SSRC:.S=.c),$(CSRC)) + # fails for some reason ifneq ($(strip $(ARCH_OBJS)),) CSRC := $(filter-out $(notdir $(ARCH_OBJS:.o=.c)) $(ARCH_OBJ_FILTEROUT),$(CSRC)) diff --git a/libc/sysdeps/linux/powerpc/Makefile.arch b/libc/sysdeps/linux/powerpc/Makefile.arch index 8c7fc2de6..cdb35ba43 100644 --- a/libc/sysdeps/linux/powerpc/Makefile.arch +++ b/libc/sysdeps/linux/powerpc/Makefile.arch @@ -13,7 +13,10 @@ endif SSRC := \ __longjmp.S setjmp.S bsd-setjmp.S bsd-_setjmp.S brk.S \ - clone.S __uClibc_syscall.S syscall.S vfork.S + __uClibc_syscall.S syscall.S +ifneq ($(UCLIBC_HAS_THREADS_NATIVE),y) +SSRC += clone.S vfork.S +endif ifeq ($(CONFIG_E500),y) ARCH_HEADERS := fenv.h diff --git a/libc/sysdeps/linux/powerpc/bits/atomic.h b/libc/sysdeps/linux/powerpc/bits/atomic.h index 1088d2f3c..3dbbb3a09 100644 --- a/libc/sysdeps/linux/powerpc/bits/atomic.h +++ b/libc/sysdeps/linux/powerpc/bits/atomic.h @@ -335,12 +335,28 @@ # define __arch_atomic_decrement_if_positive_64(mem) \ ({ abort (); (*mem)--; }) +#ifdef _ARCH_PWR4 +/* + * Newer powerpc64 processors support the new "light weight" sync (lwsync) + * So if the build is using -mcpu=[power4,power5,power5+,970] we can + * safely use lwsync. + */ +# define atomic_read_barrier() __asm ("lwsync" ::: "memory") +/* + * "light weight" sync can also be used for the release barrier. + */ +# ifndef UP +# define __ARCH_REL_INSTR "lwsync" +# endif +#else + /* * Older powerpc32 processors don't support the new "light weight" * sync (lwsync). So the only safe option is to use normal sync * for all powerpc32 applications. */ # define atomic_read_barrier() __asm__ ("sync" ::: "memory") +#endif #endif @@ -387,6 +403,13 @@ typedef uintmax_t uatomic_max_t; # endif #endif +#ifndef MUTEX_HINT_ACQ +# define MUTEX_HINT_ACQ +#endif +#ifndef MUTEX_HINT_REL +# define MUTEX_HINT_REL +#endif + #define atomic_full_barrier() __asm__ ("sync" ::: "memory") #define atomic_write_barrier() __asm__ ("eieio" ::: "memory") diff --git a/libc/sysdeps/linux/powerpc/bits/mathdef.h b/libc/sysdeps/linux/powerpc/bits/mathdef.h index d6d35dda0..c64b8a395 100644 --- a/libc/sysdeps/linux/powerpc/bits/mathdef.h +++ b/libc/sysdeps/linux/powerpc/bits/mathdef.h @@ -46,12 +46,9 @@ typedef double double_t; /* `double' expressions are evaluated as #endif /* ISO C99 */ #ifndef __NO_LONG_DOUBLE_MATH -#include <bits/wordsize.h> /* Signal that we do not really have a `long double'. The disables the declaration of all the `long double' function variants. */ -# if __WORDSIZE == 32 +# if !defined __UCLIBC_HAS_LONG_DOUBLE_MATH__ # define __NO_LONG_DOUBLE_MATH 1 -# elif !defined __UCLIBC_HAS_LONG_DOUBLE_MATH__ -# define __NO_LONG_DOUBLE_MATH 1 -# endif /* __WORDSIZE == 32 */ +# endif #endif /* __NO_LONG_DOUBLE_MATH */ diff --git a/libc/sysdeps/linux/powerpc/bits/syscalls.h b/libc/sysdeps/linux/powerpc/bits/syscalls.h index f689c60ae..1c7b929b2 100644 --- a/libc/sysdeps/linux/powerpc/bits/syscalls.h +++ b/libc/sysdeps/linux/powerpc/bits/syscalls.h @@ -116,7 +116,7 @@ register long int r10 __asm__ ("r10"); \ register long int r11 __asm__ ("r11"); \ register long int r12 __asm__ ("r12"); \ - LOADARGS_##nr (funcptr, args); \ + LOAD_ARGS_##nr (funcptr, args); \ __asm__ __volatile__ \ ("mtctr %0\n\t" \ "bctrl\n\t" \ @@ -153,7 +153,7 @@ register long int r10 __asm__ ("r10"); \ register long int r11 __asm__ ("r11"); \ register long int r12 __asm__ ("r12"); \ - LOADARGS_##nr(name, args); \ + LOAD_ARGS_##nr(name, args); \ __asm__ __volatile__ \ ("sc \n\t" \ "mfcr %0" \ @@ -178,41 +178,41 @@ extern void __illegally_sized_syscall_arg4(void); extern void __illegally_sized_syscall_arg5(void); extern void __illegally_sized_syscall_arg6(void); -# define LOADARGS_0(name, dummy) \ +# define LOAD_ARGS_0(name, dummy) \ r0 = name -# define LOADARGS_1(name, __arg1) \ +# define LOAD_ARGS_1(name, __arg1) \ long int arg1 = (long int) (__arg1); \ - LOADARGS_0(name, 0); \ + LOAD_ARGS_0(name, 0); \ if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \ __illegally_sized_syscall_arg1 (); \ r3 = arg1 -# define LOADARGS_2(name, __arg1, __arg2) \ +# define LOAD_ARGS_2(name, __arg1, __arg2) \ long int arg2 = (long int) (__arg2); \ - LOADARGS_1(name, __arg1); \ + LOAD_ARGS_1(name, __arg1); \ if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \ __illegally_sized_syscall_arg2 (); \ r4 = arg2 -# define LOADARGS_3(name, __arg1, __arg2, __arg3) \ +# define LOAD_ARGS_3(name, __arg1, __arg2, __arg3) \ long int arg3 = (long int) (__arg3); \ - LOADARGS_2(name, __arg1, __arg2); \ + LOAD_ARGS_2(name, __arg1, __arg2); \ if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \ __illegally_sized_syscall_arg3 (); \ r5 = arg3 -# define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \ +# define LOAD_ARGS_4(name, __arg1, __arg2, __arg3, __arg4) \ long int arg4 = (long int) (__arg4); \ - LOADARGS_3(name, __arg1, __arg2, __arg3); \ + LOAD_ARGS_3(name, __arg1, __arg2, __arg3); \ if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \ __illegally_sized_syscall_arg4 (); \ r6 = arg4 -# define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \ +# define LOAD_ARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \ long int arg5 = (long int) (__arg5); \ - LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \ + LOAD_ARGS_4(name, __arg1, __arg2, __arg3, __arg4); \ if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \ __illegally_sized_syscall_arg5 (); \ r7 = arg5 -# define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \ +# define LOAD_ARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \ long int arg6 = (long int) (__arg6); \ - LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \ + LOAD_ARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \ if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \ __illegally_sized_syscall_arg6 (); \ r8 = arg6 diff --git a/libc/sysdeps/linux/powerpc/bits/sysdep.h b/libc/sysdeps/linux/powerpc/bits/sysdep.h deleted file mode 100644 index 478ebdd7a..000000000 --- a/libc/sysdeps/linux/powerpc/bits/sysdep.h +++ /dev/null @@ -1,301 +0,0 @@ -/* Copyright (C) 1992,1997-2003,2004,2005,2006 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ - -#ifndef _LINUX_POWERPC_SYSDEP_H -#define _LINUX_POWERPC_SYSDEP_H 1 - -#include <sysdeps/unix/powerpc/sysdep.h> -#include <tls.h> - -/* Some systen calls got renamed over time, but retained the same semantics. - Handle them here so they can be catched by both C and assembler stubs in - glibc. */ - -#ifdef __NR_pread64 -# ifdef __NR_pread -# error "__NR_pread and __NR_pread64 both defined???" -# endif -# define __NR_pread __NR_pread64 -#endif - -#ifdef __NR_pwrite64 -# ifdef __NR_pwrite -# error "__NR_pwrite and __NR_pwrite64 both defined???" -# endif -# define __NR_pwrite __NR_pwrite64 -#endif - -/* For Linux we can use the system call table in the header file - /usr/include/asm/unistd.h - of the kernel. But these symbols do not follow the SYS_* syntax - so we have to redefine the `SYS_ify' macro here. */ -#undef SYS_ify -#ifdef __STDC__ -# define SYS_ify(syscall_name) __NR_##syscall_name -#else -# define SYS_ify(syscall_name) __NR_/**/syscall_name -#endif - -#ifndef __ASSEMBLER__ - -# include <errno.h> - -# ifdef SHARED -# define INLINE_VSYSCALL(name, nr, args...) \ - ({ \ - __label__ out; \ - __label__ iserr; \ - INTERNAL_SYSCALL_DECL (sc_err); \ - long int sc_ret; \ - \ - if (__vdso_##name != NULL) \ - { \ - sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args); \ - if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ - goto out; \ - if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \ - goto iserr; \ - } \ - \ - sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \ - if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ - { \ - iserr: \ - __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \ - sc_ret = -1L; \ - } \ - out: \ - sc_ret; \ - }) -# else -# define INLINE_VSYSCALL(name, nr, args...) \ - INLINE_SYSCALL (name, nr, ##args) -# endif - -# ifdef SHARED -# define INTERNAL_VSYSCALL(name, err, nr, args...) \ - ({ \ - __label__ out; \ - long int v_ret; \ - \ - if (__vdso_##name != NULL) \ - { \ - v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \ - if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \ - || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \ - goto out; \ - } \ - v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \ - out: \ - v_ret; \ - }) -# else -# define INTERNAL_VSYSCALL(name, err, nr, args...) \ - INTERNAL_SYSCALL (name, err, nr, ##args) -# endif - -# define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...) \ - ({ \ - long int sc_ret = ENOSYS; \ - \ - if (__vdso_##name != NULL) \ - sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \ - else \ - err = 1 << 28; \ - sc_ret; \ - }) - -/* List of system calls which are supported as vsyscalls. */ -# define HAVE_CLOCK_GETRES_VSYSCALL 1 -# define HAVE_CLOCK_GETTIME_VSYSCALL 1 - -/* Define a macro which expands inline into the wrapper code for a VDSO - call. This use is for internal calls that do not need to handle errors - normally. It will never touch errno. - On powerpc a system call basically clobbers the same registers like a - function call, with the exception of LR (which is needed for the - "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal - an error return status). */ -# define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \ - ({ \ - register void *r0 __asm__ ("r0"); \ - register long int r3 __asm__ ("r3"); \ - register long int r4 __asm__ ("r4"); \ - register long int r5 __asm__ ("r5"); \ - register long int r6 __asm__ ("r6"); \ - register long int r7 __asm__ ("r7"); \ - register long int r8 __asm__ ("r8"); \ - register long int r9 __asm__ ("r9"); \ - register long int r10 __asm__ ("r10"); \ - register long int r11 __asm__ ("r11"); \ - register long int r12 __asm__ ("r12"); \ - LOADARGS_##nr (funcptr, args); \ - __asm__ __volatile__ \ - ("mtctr %0\n\t" \ - "bctrl\n\t" \ - "mfcr %0" \ - : "=&r" (r0), \ - "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \ - "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \ - : ASM_INPUT_##nr \ - : "cr0", "ctr", "lr", "memory"); \ - err = (long int) r0; \ - (int) r3; \ - }) - -# undef INLINE_SYSCALL -# define INLINE_SYSCALL(name, nr, args...) \ - ({ \ - INTERNAL_SYSCALL_DECL (sc_err); \ - long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \ - if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ - { \ - __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \ - sc_ret = -1L; \ - } \ - sc_ret; \ - }) - -/* Define a macro which expands inline into the wrapper code for a system - call. This use is for internal calls that do not need to handle errors - normally. It will never touch errno. - On powerpc a system call basically clobbers the same registers like a - function call, with the exception of LR (which is needed for the - "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal - an error return status). */ - -# undef INTERNAL_SYSCALL_DECL -# define INTERNAL_SYSCALL_DECL(err) long int err - -# define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \ - ({ \ - register long int r0 __asm__ ("r0"); \ - register long int r3 __asm__ ("r3"); \ - register long int r4 __asm__ ("r4"); \ - register long int r5 __asm__ ("r5"); \ - register long int r6 __asm__ ("r6"); \ - register long int r7 __asm__ ("r7"); \ - register long int r8 __asm__ ("r8"); \ - register long int r9 __asm__ ("r9"); \ - register long int r10 __asm__ ("r10"); \ - register long int r11 __asm__ ("r11"); \ - register long int r12 __asm__ ("r12"); \ - LOADARGS_##nr(name, args); \ - __asm__ __volatile__ \ - ("sc \n\t" \ - "mfcr %0" \ - : "=&r" (r0), \ - "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \ - "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \ - : ASM_INPUT_##nr \ - : "cr0", "ctr", "memory"); \ - err = r0; \ - (int) r3; \ - }) -# undef INTERNAL_SYSCALL -# define INTERNAL_SYSCALL(name, err, nr, args...) \ - INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args) - -# undef INTERNAL_SYSCALL_ERROR_P -# define INTERNAL_SYSCALL_ERROR_P(val, err) \ - ((void) (val), __builtin_expect ((err) & (1 << 28), 0)) - -# undef INTERNAL_SYSCALL_ERRNO -# define INTERNAL_SYSCALL_ERRNO(val, err) (val) - -# define LOADARGS_0(name, dummy) \ - r0 = name -# define LOADARGS_1(name, __arg1) \ - long int arg1 = (long int) (__arg1); \ - LOADARGS_0(name, 0); \ - extern void __illegally_sized_syscall_arg1 (void); \ - if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \ - __illegally_sized_syscall_arg1 (); \ - r3 = arg1 -# define LOADARGS_2(name, __arg1, __arg2) \ - long int arg2 = (long int) (__arg2); \ - LOADARGS_1(name, __arg1); \ - extern void __illegally_sized_syscall_arg2 (void); \ - if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \ - __illegally_sized_syscall_arg2 (); \ - r4 = arg2 -# define LOADARGS_3(name, __arg1, __arg2, __arg3) \ - long int arg3 = (long int) (__arg3); \ - LOADARGS_2(name, __arg1, __arg2); \ - extern void __illegally_sized_syscall_arg3 (void); \ - if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \ - __illegally_sized_syscall_arg3 (); \ - r5 = arg3 -# define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \ - long int arg4 = (long int) (__arg4); \ - LOADARGS_3(name, __arg1, __arg2, __arg3); \ - extern void __illegally_sized_syscall_arg4 (void); \ - if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \ - __illegally_sized_syscall_arg4 (); \ - r6 = arg4 -# define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \ - long int arg5 = (long int) (__arg5); \ - LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \ - extern void __illegally_sized_syscall_arg5 (void); \ - if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \ - __illegally_sized_syscall_arg5 (); \ - r7 = arg5 -# define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \ - long int arg6 = (long int) (__arg6); \ - LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \ - extern void __illegally_sized_syscall_arg6 (void); \ - if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \ - __illegally_sized_syscall_arg6 (); \ - r8 = arg6 - -# define ASM_INPUT_0 "0" (r0) -# define ASM_INPUT_1 ASM_INPUT_0, "1" (r3) -# define ASM_INPUT_2 ASM_INPUT_1, "2" (r4) -# define ASM_INPUT_3 ASM_INPUT_2, "3" (r5) -# define ASM_INPUT_4 ASM_INPUT_3, "4" (r6) -# define ASM_INPUT_5 ASM_INPUT_4, "5" (r7) -# define ASM_INPUT_6 ASM_INPUT_5, "6" (r8) - -#endif /* __ASSEMBLER__ */ - - -/* Pointer mangling support. */ -#if defined NOT_IN_libc && defined IS_IN_rtld -/* We cannot use the thread descriptor because in ld.so we use setjmp - earlier than the descriptor is initialized. */ -#else -# ifdef __ASSEMBLER__ -# define PTR_MANGLE(reg, tmpreg) \ - lwz tmpreg,POINTER_GUARD(r2); \ - xor reg,tmpreg,reg -# define PTR_MANGLE2(reg, tmpreg) \ - xor reg,tmpreg,reg -# define |