summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ldso/ldso/powerpc/boot1_arch.h23
-rw-r--r--ldso/ldso/powerpc/dl-startup.h23
-rw-r--r--ldso/ldso/powerpc/dl-syscalls.h243
-rw-r--r--ldso/ldso/powerpc/dl-sysdep.h136
-rw-r--r--ldso/ldso/powerpc/elfinterp.c485
-rw-r--r--ldso/ldso/powerpc/ld_syscalls.h243
-rw-r--r--ldso/ldso/powerpc/ld_sysdep.h136
-rw-r--r--ldso/ldso/powerpc/resolve.S82
-rw-r--r--ldso/ldso/powerpc/syscalls.h243
-rw-r--r--ldso/ldso/powerpc/sysdep.h136
10 files changed, 1750 insertions, 0 deletions
diff --git a/ldso/ldso/powerpc/boot1_arch.h b/ldso/ldso/powerpc/boot1_arch.h
new file mode 100644
index 000000000..30fd7542a
--- /dev/null
+++ b/ldso/ldso/powerpc/boot1_arch.h
@@ -0,0 +1,23 @@
+/* Any assmbly language/system dependent hacks needed to setup boot1.c so it
+ * will work as expected and cope with whatever platform specific wierdness is
+ * needed for this architecture. */
+
+/* Overrive the default _dl_boot function, and replace it with a bit of asm.
+ * Then call the real _dl_boot function, which is now named _dl_boot2. */
+
+asm("\
+.text
+.globl _dl_boot
+_dl_boot:
+ addi 3,1,4
+
+ bl _dl_boot2
+
+ li 0,0
+ lwz 0,42(0)
+.previous\n\
+");
+
+#define _dl_boot _dl_boot2
+#define DL_BOOT(X) static void * __attribute__ ((unused)) _dl_boot (X)
+
diff --git a/ldso/ldso/powerpc/dl-startup.h b/ldso/ldso/powerpc/dl-startup.h
new file mode 100644
index 000000000..30fd7542a
--- /dev/null
+++ b/ldso/ldso/powerpc/dl-startup.h
@@ -0,0 +1,23 @@
+/* Any assmbly language/system dependent hacks needed to setup boot1.c so it
+ * will work as expected and cope with whatever platform specific wierdness is
+ * needed for this architecture. */
+
+/* Overrive the default _dl_boot function, and replace it with a bit of asm.
+ * Then call the real _dl_boot function, which is now named _dl_boot2. */
+
+asm("\
+.text
+.globl _dl_boot
+_dl_boot:
+ addi 3,1,4
+
+ bl _dl_boot2
+
+ li 0,0
+ lwz 0,42(0)
+.previous\n\
+");
+
+#define _dl_boot _dl_boot2
+#define DL_BOOT(X) static void * __attribute__ ((unused)) _dl_boot (X)
+
diff --git a/ldso/ldso/powerpc/dl-syscalls.h b/ldso/ldso/powerpc/dl-syscalls.h
new file mode 100644
index 000000000..ae37aa822
--- /dev/null
+++ b/ldso/ldso/powerpc/dl-syscalls.h
@@ -0,0 +1,243 @@
+#include <sys/types.h>
+
+/*
+ * This file contains the system call macros and syscall
+ * numbers used by the shared library loader.
+ */
+
+#define __NR_exit 1
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_getuid 24
+#define __NR_geteuid 49
+#define __NR_getgid 47
+#define __NR_getegid 50
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_stat 106
+#define __NR_mprotect 125
+
+/* Here are the macros which define how this platform makes
+ * system calls. This particular variant does _not_ set
+ * errno (note how it is disabled in __syscall_return) since
+ * these will get called before the errno symbol is dynamicly
+ * linked. */
+
+#undef __syscall_return
+#define __syscall_return(type) \
+ return (__sc_err & 0x10000000 ? /*errno = __sc_ret,*/ __sc_ret = -1 : 0), \
+ (type) __sc_ret
+
+#undef __syscall_clobbers
+#define __syscall_clobbers \
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"
+
+#undef _syscall0
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall1
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall2
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall3
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall4
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall5
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_7 = (unsigned long) (arg5); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6), \
+ "r" (__sc_7) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+
+#undef _syscall6
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ register unsigned long __sc_8 __asm__ ("r8"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_7 = (unsigned long) (arg5); \
+ __sc_8 = (unsigned long) (arg6); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6), \
+ "r" (__sc_7), \
+ "r" (__sc_8) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+
diff --git a/ldso/ldso/powerpc/dl-sysdep.h b/ldso/ldso/powerpc/dl-sysdep.h
new file mode 100644
index 000000000..16b853ce3
--- /dev/null
+++ b/ldso/ldso/powerpc/dl-sysdep.h
@@ -0,0 +1,136 @@
+/*
+ * Various assmbly language/system dependent hacks that are required
+ * so that we can minimize the amount of platform specific code.
+ */
+
+/*
+ * Define this if the system uses RELOCA.
+ */
+#define ELF_USES_RELOCA
+
+/*
+ * Get a pointer to the argv array. On many platforms this can be just
+ * the address if the first argument, on other platforms we need to
+ * do something a little more subtle here.
+ */
+#define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long*) ARGS)
+
+/*
+ * Initialization sequence for a GOT.
+ */
+#define INIT_GOT(GOT_BASE,MODULE) _dl_init_got(GOT_BASE,MODULE)
+
+/* Stuff for the PLT. */
+#define PLT_INITIAL_ENTRY_WORDS 18
+#define PLT_LONGBRANCH_ENTRY_WORDS 0
+#define PLT_TRAMPOLINE_ENTRY_WORDS 6
+#define PLT_DOUBLE_SIZE (1<<13)
+#define PLT_ENTRY_START_WORDS(entry_number) \
+ (PLT_INITIAL_ENTRY_WORDS + (entry_number)*2 \
+ + ((entry_number) > PLT_DOUBLE_SIZE \
+ ? ((entry_number) - PLT_DOUBLE_SIZE)*2 \
+ : 0))
+#define PLT_DATA_START_WORDS(num_entries) PLT_ENTRY_START_WORDS(num_entries)
+
+/* Macros to build PowerPC opcode words. */
+#define OPCODE_ADDI(rd,ra,simm) \
+ (0x38000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADDIS(rd,ra,simm) \
+ (0x3c000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADD(rd,ra,rb) \
+ (0x7c000214 | (rd) << 21 | (ra) << 16 | (rb) << 11)
+#define OPCODE_B(target) (0x48000000 | ((target) & 0x03fffffc))
+#define OPCODE_BA(target) (0x48000002 | ((target) & 0x03fffffc))
+#define OPCODE_BCTR() 0x4e800420
+#define OPCODE_LWZ(rd,d,ra) \
+ (0x80000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_LWZU(rd,d,ra) \
+ (0x84000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_MTCTR(rd) (0x7C0903A6 | (rd) << 21)
+#define OPCODE_RLWINM(ra,rs,sh,mb,me) \
+ (0x54000000 | (rs) << 21 | (ra) << 16 | (sh) << 11 | (mb) << 6 | (me) << 1)
+
+#define OPCODE_LI(rd,simm) OPCODE_ADDI(rd,0,simm)
+#define OPCODE_ADDIS_HI(rd,ra,value) \
+ OPCODE_ADDIS(rd,ra,((value) + 0x8000) >> 16)
+#define OPCODE_LIS_HI(rd,value) OPCODE_ADDIS_HI(rd,0,value)
+#define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
+
+
+#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
+#define PPC_SYNC asm volatile ("sync" : : : "memory")
+#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
+#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
+#define PPC_DIE asm volatile ("tweq 0,0")
+
+/*
+ * Here is a macro to perform a relocation. This is only used when
+ * bootstrapping the dynamic loader. RELP is the relocation that we
+ * are performing, REL is the pointer to the address we are relocating.
+ * SYMBOL is the symbol involved in the relocation, and LOAD is the
+ * load address.
+ */
+// finaladdr = LOAD ?
+#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD) \
+ {int type=ELF32_R_TYPE((RELP)->r_info); \
+ if(type==R_PPC_NONE){ \
+ }else if(type==R_PPC_ADDR32){ \
+ *REL += (SYMBOL); \
+ }else if(type==R_PPC_RELATIVE){ \
+ *REL = (Elf32_Word)(LOAD) + (RELP)->r_addend; \
+ }else if(type==R_PPC_REL24){ \
+ Elf32_Sword delta = (Elf32_Word)(SYMBOL) - (Elf32_Word)(REL); \
+ *REL &= 0xfc000003; \
+ *REL |= (delta & 0x03fffffc); \
+ }else if(type==R_PPC_JMP_SLOT){ \
+ Elf32_Sword delta = (Elf32_Word)(SYMBOL) - (Elf32_Word)(REL); \
+ /*if (delta << 6 >> 6 != delta)_dl_exit(99);*/ \
+ *REL = OPCODE_B(delta); \
+ }else{ \
+ _dl_exit(100+ELF32_R_TYPE((RELP)->r_info)); \
+ } \
+/*hexprint(*REL);*/ \
+ PPC_DCBST(REL); PPC_SYNC; PPC_ICBI(REL); \
+ }
+
+#if 0
+ case R_386_32: \
+ *REL += SYMBOL; \
+ break; \
+ case R_386_PC32: \
+ *REL += SYMBOL - (unsigned long) REL; \
+ break; \
+ case R_386_GLOB_DAT: \
+ case R_386_JMP_SLOT: \
+ *REL = SYMBOL; \
+ break; \
+ case R_386_RELATIVE: \
+ *REL += (unsigned long) LOAD; \
+ break;
+#endif
+
+/*
+ * Transfer control to the user's application, once the dynamic loader
+ * is done. This routine has to exit the current function, then
+ * call the _dl_elf_main function.
+ */
+#define START() \
+ __asm__ volatile ("mtlr %0\n\t" \
+ "blrl\n\t" \
+ : "=r" (status) : "r" (_dl_elf_main))
+
+
+
+/* Here we define the magic numbers that this dynamic loader should accept */
+
+#define MAGIC1 EM_PPC
+#undef MAGIC2
+/* Used for error messages */
+#define ELF_TARGET "powerpc"
+
+struct elf_resolve;
+extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry);
+void _dl_init_got(unsigned long *lpnt,struct elf_resolve *tpnt);
+
+
+#define do_rem(result, n, base) result = (n % base)
diff --git a/ldso/ldso/powerpc/elfinterp.c b/ldso/ldso/powerpc/elfinterp.c
new file mode 100644
index 000000000..81b10c778
--- /dev/null
+++ b/ldso/ldso/powerpc/elfinterp.c
@@ -0,0 +1,485 @@
+/* Run an ELF binary on a linux system.
+
+ Copyright (C) 1993, Eric Youngdale.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef VERBOSE_DLINKER
+#define VERBOSE_DLINKER
+#endif
+#ifdef VERBOSE_DLINKER
+static char *_dl_reltypes[] =
+ { "R_PPC_NONE", "R_PPC_ADDR32", "R_PPC_ADDR24", "R_PPC_ADDR16",
+ "R_PPC_ADDR16_LO", "R_PPC_ADDR16_HI", "R_PPC_ADDR16_HA",
+ "R_PPC_ADDR14", "R_PPC_ADDR14_BRTAKEN", "R_PPC_ADDR14_BRNTAKEN",
+ "R_PPC_REL24", "R_PPC_REL14", "R_PPC_REL14_BRTAKEN",
+ "R_PPC_REL14_BRNTAKEN", "R_PPC_GOT16", "R_PPC_GOT16_LO",
+ "R_PPC_GOT16_HI", "R_PPC_GOT16_HA", "R_PPC_PLTREL24",
+ "R_PPC_COPY", "R_PPC_GLOB_DAT", "R_PPC_JMP_SLOT", "R_PPC_RELATIVE",
+ "R_PPC_LOCAL24PC", "R_PPC_UADDR32", "R_PPC_UADDR16", "R_PPC_REL32",
+ "R_PPC_PLT32", "R_PPC_PLTREL32", "R_PPC_PLT16_LO", "R_PPC_PLT16_HI",
+ "R_PPC_PLT16_HA", "R_PPC_SDAREL16", "R_PPC_SECTOFF",
+ "R_PPC_SECTOFF_LO", "R_PPC_SECTOFF_HI", "R_PPC_SECTOFF_HA",
+};
+#define N_RELTYPES (sizeof(_dl_reltypes)/sizeof(_dl_reltypes[0]))
+#endif
+
+/* Program to load an ELF binary on a linux system, and run it.
+ References to symbols in sharable libraries can be resolved by either
+ an ELF sharable library or a linux style of shared library. */
+
+/* Disclaimer: I have never seen any AT&T source code for SVr4, nor have
+ I ever taken any courses on internals. This program was developed using
+ information available through the book "UNIX SYSTEM V RELEASE 4,
+ Programmers guide: Ansi C and Programming Support Tools", which did
+ a more than adequate job of explaining everything required to get this
+ working. */
+
+#include <sys/types.h>
+#include <errno.h>
+#include "sysdep.h"
+#include <elf.h>
+#include "linuxelf.h"
+#include "hash.h"
+#include "syscall.h"
+#include "string.h"
+
+extern char *_dl_progname;
+
+extern int _dl_linux_resolve(void);
+
+void _dl_init_got(unsigned long *plt,struct elf_resolve *tpnt)
+{
+ int i;
+ unsigned long target_addr = (unsigned long)_dl_linux_resolve;
+ unsigned int n_plt_entries;
+ unsigned long *tramp;
+ unsigned long data_words;
+ unsigned int rel_offset_words;
+ unsigned int offset;
+
+ _dl_fdprintf(2,"init_got plt=%08lx, tpnt=%08lx\n",
+ (unsigned long)plt,(unsigned long)tpnt);
+
+ n_plt_entries = tpnt->dynamic_info[DT_PLTRELSZ] / sizeof(ELF_RELOC);
+_dl_fdprintf(2,"n_plt_entries %d\n",n_plt_entries);
+
+rel_offset_words = PLT_DATA_START_WORDS(n_plt_entries);
+_dl_fdprintf(2,"rel_offset_words %08x\n",rel_offset_words);
+data_words = (unsigned long)(plt + rel_offset_words);
+_dl_fdprintf(2,"data_words %08x\n",data_words);
+
+ //lpnt += PLT_INITIAL_ENTRY_WORDS;
+
+ plt[PLT_LONGBRANCH_ENTRY_WORDS] = OPCODE_ADDIS_HI(11, 11, data_words);
+ plt[PLT_LONGBRANCH_ENTRY_WORDS+1] = OPCODE_LWZ(11,data_words,11);
+
+ plt[PLT_LONGBRANCH_ENTRY_WORDS+2] = OPCODE_MTCTR(11);
+ plt[PLT_LONGBRANCH_ENTRY_WORDS+3] = OPCODE_BCTR();
+
+ tramp = plt + PLT_TRAMPOLINE_ENTRY_WORDS;
+ tramp[0] = OPCODE_ADDIS_HI(11,11,-data_words);
+ tramp[1] = OPCODE_ADDI(11,11,-data_words);
+ tramp[2] = OPCODE_SLWI(12,11,1);
+ tramp[3] = OPCODE_ADD(11,12,11);
+ tramp[4] = OPCODE_LI(12,target_addr);
+ tramp[5] = OPCODE_ADDIS_HI(12,12,target_addr);
+ tramp[6] = OPCODE_MTCTR(12);
+ tramp[7] = OPCODE_LI(12,(unsigned long)tpnt);
+ tramp[8] = OPCODE_ADDIS_HI(12,12,(unsigned long)tpnt);
+ tramp[9] = OPCODE_BCTR();
+
+#if 0
+ offset = PLT_INITIAL_ENTRY_WORDS;
+ i = 0;
+ if(n_plt_entries >= PLT_DOUBLE_SIZE){
+ _dl_fdprintf(2,"PLT table too large (%d>=%d)\n",
+ n_plt_entries,PLT_DOUBLE_SIZE);
+ _dl_exit(1);
+ }
+ for(i=0;i<n_plt_entries;i++){
+ plt[offset] = OPCODE_LI (11,i*4);
+ plt[offset+1] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS + 2
+ - (offset + 1)) *4);
+ offset+=2;
+ }
+
+ for(i=0;i<rel_offset_words;i+=4){
+_dl_fdprintf(2,"%d %08lx\n",i,(unsigned long)(plt+i));
+ //PPC_DCBST(plt+i);
+ //PPC_SYNC;
+ //PPC_ICBI(plt+i);
+ }
+#if 0
+ PPC_DCBST(plt + rel_offset_words - 1);
+ PPC_SYNC;
+ PPC_ICBI(plt);
+ PPC_ICBI(plt+rel_offset_words-1);
+#endif
+ //PPC_ISYNC;
+#endif
+}
+
+unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry)
+{
+ int reloc_type;
+ ELF_RELOC *this_reloc;
+ char *strtab;
+ Elf32_Sym *symtab;
+ ELF_RELOC *rel_addr;
+ int symtab_index;
+ char *new_addr;
+ char **got_addr;
+ unsigned long instr_addr;
+
+_dl_fdprintf(2,"linux_resolver tpnt=%08x reloc_entry=%08x\n",tpnt,reloc_entry);
+ rel_addr = (ELF_RELOC *) (tpnt->dynamic_info[DT_JMPREL] + tpnt->loadaddr);
+
+ this_reloc = (void *)rel_addr + reloc_entry;
+ reloc_type = ELF32_R_TYPE(this_reloc->r_info);
+ symtab_index = ELF32_R_SYM(this_reloc->r_info);
+
+ symtab = (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+
+
+ if (reloc_type != R_PPC_JMP_SLOT) {
+ _dl_fdprintf(2, "%s: Incorrect relocation type [%s] in jump relocations\n",
+ _dl_progname,
+ (reloc_type<N_RELTYPES)?_dl_reltypes[reloc_type]:"unknown");
+ _dl_exit(1);
+ };
+
+ /* Address of dump instruction to fix up */
+ instr_addr = ((unsigned long) this_reloc->r_offset +
+ (unsigned long) tpnt->loadaddr);
+ got_addr = (char **) instr_addr;
+
+//#ifdef DEBUG
+ _dl_fdprintf(2, "Resolving symbol %s\n",
+ strtab + symtab[symtab_index].st_name);
+//#endif
+
+ /* Get the address of the GOT entry */
+ new_addr = _dl_find_hash(strtab + symtab[symtab_index].st_name,
+ tpnt->symbol_scope, (unsigned long) got_addr, tpnt, 0);
+ if (!new_addr) {
+ _dl_fdprintf(2, "%s: can't resolve symbol '%s'\n",
+ _dl_progname, strtab + symtab[symtab_index].st_name);
+ _dl_exit(1);
+ };
+/* #define DEBUG_LIBRARY */
+#ifdef DEBUG_LIBRARY
+ if ((unsigned long) got_addr < 0x40000000) {
+ _dl_fdprintf(2, "Calling library function: %s\n",
+ strtab + symtab[symtab_index].st_name);
+ } else {
+ *got_addr = new_addr;
+ }
+#else
+ *got_addr = new_addr;
+#endif
+ return (unsigned long) new_addr;
+}
+
+void _dl_parse_lazy_relocation_information(struct elf_resolve *tpnt,
+ unsigned long rel_addr, unsigned long rel_size, int type)
+{
+ int i;
+ char *strtab;
+ int reloc_type;
+ int symtab_index;
+ Elf32_Sym *symtab;
+ ELF_RELOC *rpnt;
+ unsigned long *reloc_addr;
+ unsigned long *plt;
+ int index;
+
+_dl_fdprintf(2,"parse_lazy tpnt=%08x rel_addr=%08x rel_size=%08x, type=%d\n",
+ tpnt,rel_addr,rel_size,type);
+ /* Now parse the relocation information */
+ rpnt = (ELF_RELOC *) (rel_addr + tpnt->loadaddr);
+ rel_size = rel_size / sizeof(ELF_RELOC);
+
+ symtab =
+ (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+ plt = (unsigned long *)(tpnt->dynamic_info[DT_PLTGOT] + tpnt->loadaddr);
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
+ reloc_type = ELF32_R_TYPE(rpnt->r_info);
+ symtab_index = ELF32_R_SYM(rpnt->r_info);
+
+ /* When the dynamic linker bootstrapped itself, it resolved some symbols.
+ Make sure we do not do them again */
+ if (!symtab_index && tpnt->libtype == program_interpreter)
+ continue;
+ if (symtab_index && tpnt->libtype == program_interpreter &&
+ _dl_symbol(strtab + symtab[symtab_index].st_name))
+ continue;
+
+#if 0
+_dl_fdprintf(2, "(lazy) resolving ");
+ if (symtab_index)
+ _dl_fdprintf(2, "'%s'\n", strtab + symtab[symtab_index].st_name);
+_dl_fdprintf(2, "reloc_addr %08x addr %08x old %08x\n", reloc_addr, symtab[symtab_index].st_value, *reloc_addr);
+_dl_fdprintf(2, "plt %08x\n",(unsigned long)plt);
+#endif
+
+
+ switch (reloc_type) {
+ case R_PPC_NONE:
+ break;
+ case R_PPC_JMP_SLOT:
+ {
+ int delta;
+
+ delta = (unsigned long)(plt+PLT_TRAMPOLINE_ENTRY_WORDS+2)
+ - (unsigned long)(reloc_addr+1);
+
+ index = ((unsigned long)reloc_addr -
+ (unsigned long)(plt+PLT_INITIAL_ENTRY_WORDS))
+ /sizeof(unsigned long);
+ index /= 2;
+//_dl_fdprintf(2, "index %08x\n",index);
+//_dl_fdprintf(2, "delta %08x\n",delta);
+ reloc_addr[0] = OPCODE_LI(11,index*4);
+ reloc_addr[1] = OPCODE_B(delta);
+ break;
+ }
+ default:
+ _dl_fdprintf(2, "%s: (LAZY) can't handle reloc type ",
+ _dl_progname);
+#ifdef VERBOSE_DLINKER
+ _dl_fdprintf(2, "%s ", _dl_reltypes[reloc_type]);
+#endif
+ if (symtab_index)
+ _dl_fdprintf(2, "'%s'\n", strtab + symtab[symtab_index].st_name);
+ _dl_exit(1);
+ };
+
+ /* instructions were modified */
+ PPC_DCBST(reloc_addr);
+ PPC_SYNC;
+ PPC_ICBI(reloc_addr);
+ };
+}
+
+int _dl_parse_relocation_information(struct elf_resolve *tpnt,
+ unsigned long rel_addr, unsigned long rel_size, int type)
+{
+ int i;
+ char *strtab;
+ int reloc_type;
+ int goof = 0;
+ Elf32_Sym *symtab;
+ ELF_RELOC *rpnt;
+ unsigned long *reloc_addr;
+ unsigned long symbol_addr;
+ int symtab_index;
+ unsigned long addend;
+ unsigned long *plt;
+
+//_dl_fdprintf(2,"parse_reloc tpnt=%08x rel_addr=%08x rel_size=%08x, type=%d\n",
+// tpnt,rel_addr,rel_size,type);
+ /* Now parse the relocation information */
+
+ rpnt = (ELF_RELOC *) (rel_addr + tpnt->loadaddr);
+ rel_size = rel_size / sizeof(ELF_RELOC);
+//_dl_fdprintf(2,"rpnt=%08x\n",rpnt);
+
+ symtab = (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+//_dl_fdprintf(2,"symtab=%08x\n",symtab);
+//_dl_fdprintf(2,"strtab=%08x\n",strtab);
+ plt = (unsigned long *)(tpnt->dynamic_info[DT_PLTGOT] + tpnt->loadaddr);
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
+//_dl_fdprintf(2,"reloc_addr=%08x\n",reloc_addr);
+ reloc_type = ELF32_R_TYPE(rpnt->r_info);
+//_dl_fdprintf(2,"reloc_type=%08x\n",reloc_type);
+ symtab_index = ELF32_R_SYM(rpnt->r_info);
+//_dl_fdprintf(2,"symtab_index=%08x\n",symtab_index);
+ addend = rpnt->r_addend;
+//_dl_fdprintf(2,"addend=%08x\n",rpnt->r_addend);
+ symbol_addr = 0;
+
+ if (!symtab_index && tpnt->libtype == program_interpreter)
+ continue;
+
+ if (symtab_index) {
+
+ if (tpnt->libtype == program_interpreter &&
+ _dl_symbol(strtab + symtab[symtab_index].st_name))
+ continue;
+
+ symbol_addr = (unsigned long) _dl_find_hash(strtab + symtab[symtab_index].st_name,
+ tpnt->symbol_scope, (unsigned long) reloc_addr,
+ (reloc_type == R_PPC_JMP_SLOT ? tpnt : NULL), 0);
+
+ /*
+ * We want to allow undefined references to weak symbols - this might
+ * have been intentional. We should not be linking local symbols
+ * here, so all bases should be covered.
+ */
+ if (!symbol_addr &&
+ ELF32_ST_BIND(symtab[symtab_index].st_info) == STB_GLOBAL) {
+ _dl_fdprintf(2, "%s: can't resolve symbol '%s'\n",
+ _dl_progname, strtab + symtab[symtab_index].st_name);
+ goof++;
+ }
+ }
+ switch (reloc_type) {
+ case R_PPC_NONE:
+ break;
+ case R_PPC_REL24:
+ {
+ int delta = symbol_addr - (unsigned long)reloc_addr;
+ *reloc_addr &= 0xfc000003;
+ *reloc_addr |= delta&0x03fffffc;
+ }
+ break;
+ case R_PPC_RELATIVE:
+ *reloc_addr += (unsigned long)tpnt->loadaddr + addend;
+ break;
+ case R_PPC_ADDR32:
+ *reloc_addr += symbol_addr;
+ break;
+ case R_PPC_JMP_SLOT:
+ {
+ unsigned long targ_addr = (unsigned long)_dl_linux_resolve;
+ int delta = targ_addr - (unsigned long)reloc_addr;
+ if(delta<<6>>6 == delta){
+ *reloc_addr = OPCODE_B(delta);
+ }else if (targ_addr <= 0x01fffffc || targ_addr >= 0xfe000000){
+ *reloc_addr = OPCODE_BA (targ_addr);
+ }else{
+ {
+ int delta;
+ int index;
+
+ delta = (unsigned long)(plt+PLT_TRAMPOLINE_ENTRY_WORDS+2)
+ - (unsigned long)(reloc_addr+1);
+
+ index = ((unsigned long)reloc_addr -
+ (unsigned long)(plt+PLT_INITIAL_ENTRY_WORDS))
+ /sizeof(unsigned long);
+ index /= 2;
+//_dl_fdprintf(2, "index %08x\n",index);
+//_dl_fdprintf(2, "delta %08x\n",delta);
+ reloc_addr[0] = OPCODE_LI(11,index*4);
+ reloc_addr[1] = OPCODE_B(delta);
+#if 0
+_dl_fdprintf(2, "resolving ");
+ if (symtab_index)
+ _dl_fdprintf(2, "'%s'\n", strtab + symtab[symtab_index].st_name);
+_dl_fdprintf(2, "type %d reloc_addr %08x addr %08x addend %08x old %08x\n",
+ reloc_type, reloc_addr, symbol_addr, addend, *reloc_addr);
+ _dl_fdprintf(2, "need to create PLT\n");
+#endif
+ }
+ }
+ break;
+ }
+ default:
+ _dl_fdprintf(2, "%s: can't handle reloc type ", _dl_progname);
+#ifdef VERBOSE_DLINKER
+ _dl_fdprintf(2, "%s ", _dl_reltypes[reloc_type]);
+#endif
+ if (symtab_index)
+ _dl_fdprintf(2, "'%s'\n", strtab + symtab[symtab_index].st_name);
+ _dl_exit(1);
+ };
+
+ /* instructions were modified */
+ PPC_DCBST(reloc_addr);
+ PPC_SYNC;
+ PPC_ICBI(reloc_addr);
+
+//_dl_fdprintf(2,"reloc_addr %08x: %08x\n",reloc_addr,*reloc_addr);
+ };
+ return goof;
+}
+
+
+/* This is done as a separate step, because there are cases where
+ information is first copied and later initialized. This results in
+ the wrong information being copied. Someone at Sun was complaining about
+ a bug in the handling of _COPY by SVr4, and this may in fact be what he
+ was talking about. Sigh. */
+
+/* No, there are cases where the SVr4 linker fails to emit COPY relocs
+ at all */
+
+int _dl_parse_copy_information(struct dyn_elf *xpnt, unsigned long rel_addr,
+ unsigned long rel_size, int type)
+{
+ int i;
+ char *strtab;
+ int reloc_type;
+ int goof = 0;
+ Elf32_Sym *symtab;
+ ELF_RELOC *rpnt;
+ unsigned long *reloc_addr;
+ unsigned long symbol_addr;
+ struct elf_resolve *tpnt;
+ int symtab_index;
+
+_dl_fdprintf(2,"parse_copy xpnt=%08x rel_addr=%08x rel_size=%08x type=%d\n",
+ (int)xpnt,rel_addr,rel_size,type);
+
+ /* Now parse the relocation information */
+
+ tpnt = xpnt->dyn;
+
+ rpnt = (ELF_RELOC *) (rel_addr + tpnt->loadaddr);
+ rel_size = rel_size / sizeof(ELF_RELOC);
+
+ symtab = (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
+ reloc_type = ELF32_R_TYPE(rpnt->r_info);
+ if (reloc_type != R_386_COPY)
+ continue;
+ symtab_index = ELF32_R_SYM(rpnt->r_info);
+ symbol_addr = 0;
+ if (!symtab_index && tpnt->libtype == program_interpreter)
+ continue;
+ if (symtab_index) {
+
+ if (tpnt->libtype == program_interpreter &&
+ _dl_symbol(strtab + symtab[symtab_index].st_name))
+ continue;
+
+ symbol_addr = (unsigned long) _dl_find_hash(strtab +
+ symtab[symtab_index].st_name, xpnt->next,
+ (unsigned long) reloc_addr, NULL, 1);
+ if (!symbol_addr) {
+ _dl_fdprintf(2, "%s: can't resolve symbol '%s'\n",
+ _dl_progname, strtab + symtab[symtab_index].st_name);
+ goof++;
+ };
+ };
+ if (!goof) {
+ _dl_memcpy((char *) symtab[symtab_index].st_value,
+ (char *) symbol_addr, symtab[symtab_index].st_size);
+ }
+ };
+ return goof;
+}
diff --git a/ldso/ldso/powerpc/ld_syscalls.h b/ldso/ldso/powerpc/ld_syscalls.h
new file mode 100644
index 000000000..ae37aa822
--- /dev/null
+++ b/ldso/ldso/powerpc/ld_syscalls.h
@@ -0,0 +1,243 @@
+#include <sys/types.h>
+
+/*
+ * This file contains the system call macros and syscall
+ * numbers used by the shared library loader.
+ */
+
+#define __NR_exit 1
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_getuid 24
+#define __NR_geteuid 49
+#define __NR_getgid 47
+#define __NR_getegid 50
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_stat 106
+#define __NR_mprotect 125
+
+/* Here are the macros which define how this platform makes
+ * system calls. This particular variant does _not_ set
+ * errno (note how it is disabled in __syscall_return) since
+ * these will get called before the errno symbol is dynamicly
+ * linked. */
+
+#undef __syscall_return
+#define __syscall_return(type) \
+ return (__sc_err & 0x10000000 ? /*errno = __sc_ret,*/ __sc_ret = -1 : 0), \
+ (type) __sc_ret
+
+#undef __syscall_clobbers
+#define __syscall_clobbers \
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"
+
+#undef _syscall0
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall1
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall2
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall3
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall4
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall5
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_7 = (unsigned long) (arg5); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6), \
+ "r" (__sc_7) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+
+#undef _syscall6
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ register unsigned long __sc_8 __asm__ ("r8"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_7 = (unsigned long) (arg5); \
+ __sc_8 = (unsigned long) (arg6); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6), \
+ "r" (__sc_7), \
+ "r" (__sc_8) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+
diff --git a/ldso/ldso/powerpc/ld_sysdep.h b/ldso/ldso/powerpc/ld_sysdep.h
new file mode 100644
index 000000000..16b853ce3
--- /dev/null
+++ b/ldso/ldso/powerpc/ld_sysdep.h
@@ -0,0 +1,136 @@
+/*
+ * Various assmbly language/system dependent hacks that are required
+ * so that we can minimize the amount of platform specific code.
+ */
+
+/*
+ * Define this if the system uses RELOCA.
+ */
+#define ELF_USES_RELOCA
+
+/*
+ * Get a pointer to the argv array. On many platforms this can be just
+ * the address if the first argument, on other platforms we need to
+ * do something a little more subtle here.
+ */
+#define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long*) ARGS)
+
+/*
+ * Initialization sequence for a GOT.
+ */
+#define INIT_GOT(GOT_BASE,MODULE) _dl_init_got(GOT_BASE,MODULE)
+
+/* Stuff for the PLT. */
+#define PLT_INITIAL_ENTRY_WORDS 18
+#define PLT_LONGBRANCH_ENTRY_WORDS 0
+#define PLT_TRAMPOLINE_ENTRY_WORDS 6
+#define PLT_DOUBLE_SIZE (1<<13)
+#define PLT_ENTRY_START_WORDS(entry_number) \
+ (PLT_INITIAL_ENTRY_WORDS + (entry_number)*2 \
+ + ((entry_number) > PLT_DOUBLE_SIZE \
+ ? ((entry_number) - PLT_DOUBLE_SIZE)*2 \
+ : 0))
+#define PLT_DATA_START_WORDS(num_entries) PLT_ENTRY_START_WORDS(num_entries)
+
+/* Macros to build PowerPC opcode words. */
+#define OPCODE_ADDI(rd,ra,simm) \
+ (0x38000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADDIS(rd,ra,simm) \
+ (0x3c000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADD(rd,ra,rb) \
+ (0x7c000214 | (rd) << 21 | (ra) << 16 | (rb) << 11)
+#define OPCODE_B(target) (0x48000000 | ((target) & 0x03fffffc))
+#define OPCODE_BA(target) (0x48000002 | ((target) & 0x03fffffc))
+#define OPCODE_BCTR() 0x4e800420
+#define OPCODE_LWZ(rd,d,ra) \
+ (0x80000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_LWZU(rd,d,ra) \
+ (0x84000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_MTCTR(rd) (0x7C0903A6 | (rd) << 21)
+#define OPCODE_RLWINM(ra,rs,sh,mb,me) \
+ (0x54000000 | (rs) << 21 | (ra) << 16 | (sh) << 11 | (mb) << 6 | (me) << 1)
+
+#define OPCODE_LI(rd,simm) OPCODE_ADDI(rd,0,simm)
+#define OPCODE_ADDIS_HI(rd,ra,value) \
+ OPCODE_ADDIS(rd,ra,((value) + 0x8000) >> 16)
+#define OPCODE_LIS_HI(rd,value) OPCODE_ADDIS_HI(rd,0,value)
+#define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
+
+
+#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
+#define PPC_SYNC asm volatile ("sync" : : : "memory")
+#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
+#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
+#define PPC_DIE asm volatile ("tweq 0,0")
+
+/*
+ * Here is a macro to perform a relocation. This is only used when
+ * bootstrapping the dynamic loader. RELP is the relocation that we
+ * are performing, REL is the pointer to the address we are relocating.
+ * SYMBOL is the symbol involved in the relocation, and LOAD is the
+ * load address.
+ */
+// finaladdr = LOAD ?
+#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD) \
+ {int type=ELF32_R_TYPE((RELP)->r_info); \
+ if(type==R_PPC_NONE){ \
+ }else if(type==R_PPC_ADDR32){ \
+ *REL += (SYMBOL); \
+ }else if(type==R_PPC_RELATIVE){ \
+ *REL = (Elf32_Word)(LOAD) + (RELP)->r_addend; \
+ }else if(type==R_PPC_REL24){ \
+ Elf32_Sword delta = (Elf32_Word)(SYMBOL) - (Elf32_Word)(REL); \
+ *REL &= 0xfc000003; \
+ *REL |= (delta & 0x03fffffc); \
+ }else if(type==R_PPC_JMP_SLOT){ \
+ Elf32_Sword delta = (Elf32_Word)(SYMBOL) - (Elf32_Word)(REL); \
+ /*if (delta << 6 >> 6 != delta)_dl_exit(99);*/ \
+ *REL = OPCODE_B(delta); \
+ }else{ \
+ _dl_exit(100+ELF32_R_TYPE((RELP)->r_info)); \
+ } \
+/*hexprint(*REL);*/ \
+ PPC_DCBST(REL); PPC_SYNC; PPC_ICBI(REL); \
+ }
+
+#if 0
+ case R_386_32: \
+ *REL += SYMBOL; \
+ break; \
+ case R_386_PC32: \
+ *REL += SYMBOL - (unsigned long) REL; \
+ break; \
+ case R_386_GLOB_DAT: \
+ case R_386_JMP_SLOT: \
+ *REL = SYMBOL; \
+ break; \
+ case R_386_RELATIVE: \
+ *REL += (unsigned long) LOAD; \
+ break;
+#endif
+
+/*
+ * Transfer control to the user's application, once the dynamic loader
+ * is done. This routine has to exit the current function, then
+ * call the _dl_elf_main function.
+ */
+#define START() \
+ __asm__ volatile ("mtlr %0\n\t" \
+ "blrl\n\t" \
+ : "=r" (status) : "r" (_dl_elf_main))
+
+
+
+/* Here we define the magic numbers that this dynamic loader should accept */
+
+#define MAGIC1 EM_PPC
+#undef MAGIC2
+/* Used for error messages */
+#define ELF_TARGET "powerpc"
+
+struct elf_resolve;
+extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry);
+void _dl_init_got(unsigned long *lpnt,struct elf_resolve *tpnt);
+
+
+#define do_rem(result, n, base) result = (n % base)
diff --git a/ldso/ldso/powerpc/resolve.S b/ldso/ldso/powerpc/resolve.S
new file mode 100644
index 000000000..9193636fc
--- /dev/null
+++ b/ldso/ldso/powerpc/resolve.S
@@ -0,0 +1,82 @@
+/*
+ * Stolen from glibc-2.2.2 by David Schleef <ds@schleef.org>
+ */
+
+.text
+.align 4
+
+.globl _dl_linux_resolver
+
+.globl _dl_linux_resolve
+.type _dl_linux_resolve,@function
+
+_dl_linux_resolve:
+// We need to save the registers used to pass parameters, and register 0,
+// which is used by _mcount; the registers are saved in a stack frame.
+ stwu 1,-64(1)
+ stw 0,12(1)
+ stw 3,16(1)
+ stw 4,20(1)
+// The code that calls this has put parameters for 'fixup' in r12 and r11.
+ mr 3,12
+ stw 5,24(1)
+ mr 4,11
+ stw 6,28(1)
+ mflr 0
+// We also need to save some of the condition register fields.
+ stw 7,32(1)
+ stw 0,48(1)
+ stw 8,36(1)
+ mfcr 0
+ stw 9,40(1)
+ stw 10,44(1)
+ stw 0,8(1)
+ bl _dl_linux_resolver@local
+// 'fixup' returns the address we want to branch to.
+ mtctr 3
+// Put the registers back...
+ lwz 0,48(1)
+ lwz 10,44(1)
+ lwz 9,40(1)
+ mtlr 0
+ lwz 8,36(1)
+ lwz 0,8(1)
+ lwz 7,32(1)
+ lwz 6,28(1)
+ mtcrf 0xFF,0
+ lwz 5,24(1)
+ lwz 4,20(1)
+ lwz 3,16(1)
+ lwz 0,12(1)
+// ...unwind the stack frame, and jump to the PLT entry we updated.
+ addi 1,1,64
+ bctr
+
+.LFE2:
+ .size _dl_linux_resolve,.LFE2-_dl_linux_resolve
+
+#if 0
+
+ pusha /* preserve all regs */
+ lea 0x20(%esp),%eax /* eax = tpnt and reloc_entry params */
+ pushl 4(%eax) /* push copy of reloc_entry param */
+ pushl (%eax) /* push copy of tpnt param */
+
+#ifdef __PIC__
+ call .L24
+.L24:
+ popl %ebx
+ addl $_GLOBAL_OFFSET_TABLE_+[.-.L24],%ebx
+ movl _dl_linux_resolver@GOT(%ebx),%ebx /* eax = resolved func */
+ call *%ebx
+#else
+ call _dl_linux_resolver
+#endif
+ movl %eax,0x28(%esp) /* store func addr over original
+ * tpnt param */
+ addl $0x8,%esp /* remove copy parameters */
+ popa /* restore regs */
+ ret $4 /* jump to func removing original
+ * reloc_entry param from stack */
+#endif
+
diff --git a/ldso/ldso/powerpc/syscalls.h b/ldso/ldso/powerpc/syscalls.h
new file mode 100644
index 000000000..ae37aa822
--- /dev/null
+++ b/ldso/ldso/powerpc/syscalls.h
@@ -0,0 +1,243 @@
+#include <sys/types.h>
+
+/*
+ * This file contains the system call macros and syscall
+ * numbers used by the shared library loader.
+ */
+
+#define __NR_exit 1
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_getuid 24
+#define __NR_geteuid 49
+#define __NR_getgid 47
+#define __NR_getegid 50
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_stat 106
+#define __NR_mprotect 125
+
+/* Here are the macros which define how this platform makes
+ * system calls. This particular variant does _not_ set
+ * errno (note how it is disabled in __syscall_return) since
+ * these will get called before the errno symbol is dynamicly
+ * linked. */
+
+#undef __syscall_return
+#define __syscall_return(type) \
+ return (__sc_err & 0x10000000 ? /*errno = __sc_ret,*/ __sc_ret = -1 : 0), \
+ (type) __sc_ret
+
+#undef __syscall_clobbers
+#define __syscall_clobbers \
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"
+
+#undef _syscall0
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall1
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall2
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall3
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall4
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+#undef _syscall5
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_7 = (unsigned long) (arg5); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6), \
+ "r" (__sc_7) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+
+#undef _syscall6
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
+{ \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ register unsigned long __sc_8 __asm__ ("r8"); \
+ \
+ __sc_3 = (unsigned long) (arg1); \
+ __sc_4 = (unsigned long) (arg2); \
+ __sc_5 = (unsigned long) (arg3); \
+ __sc_6 = (unsigned long) (arg4); \
+ __sc_7 = (unsigned long) (arg5); \
+ __sc_8 = (unsigned long) (arg6); \
+ __sc_0 = __NR_##name; \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %1 " \
+ : "=&r" (__sc_3), "=&r" (__sc_0) \
+ : "0" (__sc_3), "1" (__sc_0), \
+ "r" (__sc_4), \
+ "r" (__sc_5), \
+ "r" (__sc_6), \
+ "r" (__sc_7), \
+ "r" (__sc_8) \
+ : __syscall_clobbers); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ __syscall_return (type); \
+}
+
+
diff --git a/ldso/ldso/powerpc/sysdep.h b/ldso/ldso/powerpc/sysdep.h
new file mode 100644
index 000000000..16b853ce3
--- /dev/null
+++ b/ldso/ldso/powerpc/sysdep.h
@@ -0,0 +1,136 @@
+/*
+ * Various assmbly language/system dependent hacks that are required
+ * so that we can minimize the amount of platform specific code.
+ */
+
+/*
+ * Define this if the system uses RELOCA.
+ */
+#define ELF_USES_RELOCA
+
+/*
+ * Get a pointer to the argv array. On many platforms this can be just
+ * the address if the first argument, on other platforms we need to
+ * do something a little more subtle here.
+ */
+#define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long*) ARGS)
+
+/*
+ * Initialization sequence for a GOT.
+ */
+#define INIT_GOT(GOT_BASE,MODULE) _dl_init_got(GOT_BASE,MODULE)
+
+/* Stuff for the PLT. */
+#define PLT_INITIAL_ENTRY_WORDS 18
+#define PLT_LONGBRANCH_ENTRY_WORDS 0
+#define PLT_TRAMPOLINE_ENTRY_WORDS 6
+#define PLT_DOUBLE_SIZE (1<<13)
+#define PLT_ENTRY_START_WORDS(entry_number) \
+ (PLT_INITIAL_ENTRY_WORDS + (entry_number)*2 \
+ + ((entry_number) > PLT_DOUBLE_SIZE \
+ ? ((entry_number) - PLT_DOUBLE_SIZE)*2 \
+ : 0))
+#define PLT_DATA_START_WORDS(num_entries) PLT_ENTRY_START_WORDS(num_entries)
+
+/* Macros to build PowerPC opcode words. */
+#define OPCODE_ADDI(rd,ra,simm) \
+ (0x38000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADDIS(rd,ra,simm) \
+ (0x3c000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADD(rd,ra,rb) \
+ (0x7c000214 | (rd) << 21 | (ra) << 16 | (rb) << 11)
+#define OPCODE_B(target) (0x48000000 | ((target) & 0x03fffffc))
+#define OPCODE_BA(target) (0x48000002 | ((target) & 0x03fffffc))
+#define OPCODE_BCTR() 0x4e800420
+#define OPCODE_LWZ(rd,d,ra) \
+ (0x80000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_LWZU(rd,d,ra) \
+ (0x84000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_MTCTR(rd) (0x7C0903A6 | (rd) << 21)
+#define OPCODE_RLWINM(ra,rs,sh,mb,me) \
+ (0x54000000 | (rs) << 21 | (ra) << 16 | (sh) << 11 | (mb) << 6 | (me) << 1)
+
+#define OPCODE_LI(rd,simm) OPCODE_ADDI(rd,0,simm)
+#define OPCODE_ADDIS_HI(rd,ra,value) \
+ OPCODE_ADDIS(rd,ra,((value) + 0x8000) >> 16)
+#define OPCODE_LIS_HI(rd,value) OPCODE_ADDIS_HI(rd,0,value)
+#define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
+
+
+#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
+#define PPC_SYNC asm volatile ("sync" : : : "memory")
+#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
+#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
+#define PPC_DIE asm volatile ("tweq 0,0")
+
+/*
+ * Here is a macro to perform a relocation. This is only used when
+ * bootstrapping the dynamic loader. RELP is the relocation that we
+ * are performing, REL is the pointer to the address we are relocating.
+ * SYMBOL is the symbol involved in the relocation, and LOAD is the
+ * load address.
+ */
+// finaladdr = LOAD ?
+#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD) \
+ {int type=ELF32_R_TYPE((RELP)->r_info); \
+ if(type==R_PPC_NONE){ \
+ }else if(type==R_PPC_ADDR32){ \
+ *REL += (SYMBOL); \
+ }else if(type==R_PPC_RELATIVE){ \
+ *REL = (Elf32_Word)(LOAD) + (RELP)->r_addend; \
+ }else if(type==R_PPC_REL24){ \
+ Elf32_Sword delta = (Elf32_Word)(SYMBOL) - (Elf32_Word)(REL); \
+ *REL &= 0xfc000003; \
+ *REL |= (delta & 0x03fffffc); \
+ }else if(type==R_PPC_JMP_SLOT){ \
+ Elf32_Sword delta = (Elf32_Word)(SYMBOL) - (Elf32_Word)(REL); \
+ /*if (delta << 6 >> 6 != delta)_dl_exit(99);*/ \
+ *REL = OPCODE_B(delta); \
+ }else{ \
+ _dl_exit(100+ELF32_R_TYPE((RELP)->r_info)); \
+ } \
+/*hexprint(*REL);*/ \
+ PPC_DCBST(REL); PPC_SYNC; PPC_ICBI(REL); \
+ }
+
+#if 0
+ case R_386_32: \
+ *REL += SYMBOL; \
+ break; \
+ case R_386_PC32: \
+ *REL += SYMBOL - (unsigned long) REL; \
+ break; \
+ case R_386_GLOB_DAT: \
+ case R_386_JMP_SLOT: \
+ *REL = SYMBOL; \
+ break; \
+ case R_386_RELATIVE: \
+ *REL += (unsigned long) LOAD; \
+ break;
+#endif
+
+/*
+ * Transfer control to the user's application, once the dynamic loader
+ * is done. This routine has to exit the current function, then
+ * call the _dl_elf_main function.
+ */
+#define START() \
+ __asm__ volatile ("mtlr %0\n\t" \
+ "blrl\n\t" \
+ : "=r" (status) : "r" (_dl_elf_main))
+
+
+
+/* Here we define the magic numbers that this dynamic loader should accept */
+
+#define MAGIC1 EM_PPC
+#undef MAGIC2
+/* Used for error messages */
+#define ELF_TARGET "powerpc"
+
+struct elf_resolve;
+extern unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry);
+void _dl_init_got(unsigned long *lpnt,struct elf_resolve *tpnt);
+
+
+#define do_rem(result, n, base) result = (n % base)