summaryrefslogtreecommitdiff
path: root/ldso
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2006-11-17 06:51:34 +0000
committerMike Frysinger <vapier@gentoo.org>2006-11-17 06:51:34 +0000
commita86d1a42c8ddc7cc36f28921e2c0201b08138943 (patch)
tree5ea208bd46b020940b62f0c391b53ac755879e50 /ldso
parenta9f3bfe7dd2f2b3f690ff88d9ce83858f9b85b6d (diff)
Bernd Schmidt writes: [blackfin updates] add support for FDPIC and include L1 functions
Diffstat (limited to 'ldso')
-rw-r--r--ldso/ldso/bfin/dl-debug.h54
-rw-r--r--ldso/ldso/bfin/dl-inlines.h543
-rw-r--r--ldso/ldso/bfin/dl-startup.h155
-rw-r--r--ldso/ldso/bfin/dl-syscalls.h208
-rw-r--r--ldso/ldso/bfin/dl-sysdep.h216
-rw-r--r--ldso/ldso/bfin/elfinterp.c352
-rw-r--r--ldso/ldso/bfin/resolve.S77
7 files changed, 1605 insertions, 0 deletions
diff --git a/ldso/ldso/bfin/dl-debug.h b/ldso/ldso/bfin/dl-debug.h
new file mode 100644
index 000000000..9dd316240
--- /dev/null
+++ b/ldso/ldso/bfin/dl-debug.h
@@ -0,0 +1,54 @@
+/* vi: set sw=4 ts=4: */
+/* Blackfin ELF shared library loader suppport
+ *
+ * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
+ * David Engel, Hongjiu Lu and Mitch D'Souza
+ * Copyright (C) 2001-2004 Erik Andersen
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. The name of the above contributors may not be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+static const char *_dl_reltypes_tab[] =
+{
+ [0] "R_BFIN_unused0", "R_BFIN_pcrel5m2",
+ [2] "R_BFIN_unused1", "R_BFIN_pcrel10",
+ [4] "R_BFIN_pcrel12_jump", "R_BFIN_rimm16",
+ [6] "R_BFIN_luimm16", "R_BFIN_huimm16",
+ [8] "R_BFIN_pcrel12_jump_s","R_BFIN_pcrel24_jump_x",
+ [10] "R_BFIN_pcrel24", "R_BFIN_unusedb",
+ [12] "R_BFIN_unusedc", "R_BFIN_pcrel24_jump_l",
+ [14] "R_BFIN_pcrel24_call_x","R_BFIN_var_eq_symb",
+ [16] "R_BFIN_byte_data", "R_BFIN_byte2_data", "R_BFIN_byte4_data",
+ [19] "R_BFIN_pcrel11",
+
+ [20] "R_BFIN_GOT17M4", "R_BFIN_GOTHI", "R_BFIN_GOTLO",
+ [23] "R_BFIN_FUNCDESC",
+ [24] "R_BFIN_FUNCDESC_GOT17M4", "R_BFIN_FUNCDESC_GOTHI", "R_BFIN_FUNCDESC_GOTLO",
+ [27] "R_BFIN_FUNCDESC_VALUE", "R_BFIN_FUNCDESC_GOTOFF17M4",
+ [29] "R_BFIN_FUNCDESC_GOTOFFHI", "R_BFIN_FUNCDESC_GOTOFFLO",
+ [31] "R_BFIN_GOTOFF17M4", "R_BFIN_GOTOFFHI", "R_BFIN_GOTOFFLO",
+#if 0
+ [200] "R_BFIN_GNU_VTINHERIT", "R_BFIN_GNU_VTENTRY"
+#endif
+};
diff --git a/ldso/ldso/bfin/dl-inlines.h b/ldso/ldso/bfin/dl-inlines.h
new file mode 100644
index 000000000..0a97065de
--- /dev/null
+++ b/ldso/ldso/bfin/dl-inlines.h
@@ -0,0 +1,543 @@
+ /* Copyright (C) 2003, 2004 Red Hat, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+This file is part of uClibc.
+
+uClibc is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as
+published by the Free Software Foundation; either version 2.1 of the
+License, or (at your option) any later version.
+
+uClibc is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with uClibc; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+USA. */
+
+#include <bits/bfin_sram.h>
+
+#ifndef _dl_assert
+# define _dl_assert(expr)
+#endif
+
+/* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete
+ load map. */
+inline static void
+__dl_init_loadaddr_map (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer,
+ struct elf32_fdpic_loadmap *map)
+{
+ if (map->version != 0)
+ {
+ SEND_EARLY_STDERR ("Invalid loadmap version number\n");
+ _dl_exit(-1);
+ }
+ if (map->nsegs == 0)
+ {
+ SEND_EARLY_STDERR ("Invalid segment count in loadmap\n");
+ _dl_exit(-1);
+ }
+ loadaddr->got_value = dl_boot_got_pointer;
+ loadaddr->map = map;
+}
+
+/* Figure out how many LOAD segments there are in the given headers,
+ and allocate a block for the load map big enough for them.
+ got_value will be properly initialized later on, with INIT_GOT. */
+inline static int
+__dl_init_loadaddr (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt,
+ int pcnt)
+{
+ int count = 0, i;
+ size_t size;
+
+ for (i = 0; i < pcnt; i++)
+ if (ppnt[i].p_type == PT_LOAD)
+ count++;
+
+ loadaddr->got_value = 0;
+
+ size = sizeof (struct elf32_fdpic_loadmap)
+ + sizeof (struct elf32_fdpic_loadseg) * count;
+ loadaddr->map = _dl_malloc (size);
+ if (! loadaddr->map)
+ _dl_exit (-1);
+
+ loadaddr->map->version = 0;
+ loadaddr->map->nsegs = 0;
+
+ return count;
+}
+
+/* Incrementally initialize a load map. */
+inline static void
+__dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
+ Elf32_Phdr *phdr, int maxsegs)
+{
+ struct elf32_fdpic_loadseg *segdata;
+
+ if (loadaddr.map->nsegs == maxsegs)
+ _dl_exit (-1);
+
+ segdata = &loadaddr.map->segs[loadaddr.map->nsegs++];
+ segdata->addr = (Elf32_Addr) addr;
+ segdata->p_vaddr = phdr->p_vaddr;
+ segdata->p_memsz = phdr->p_memsz;
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ {
+ extern char *_dl_debug;
+ extern int _dl_debug_file;
+ if (_dl_debug)
+ _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
+ loadaddr.map->nsegs-1,
+ segdata->p_vaddr, segdata->addr, segdata->p_memsz);
+ }
+#endif
+}
+
+inline static void __dl_loadaddr_unmap
+(struct elf32_fdpic_loadaddr loadaddr, struct funcdesc_ht *funcdesc_ht);
+
+/* Figure out whether the given address is in one of the mapped
+ segments. */
+inline static int
+__dl_addr_in_loadaddr (void *p, struct elf32_fdpic_loadaddr loadaddr)
+{
+ struct elf32_fdpic_loadmap *map = loadaddr.map;
+ int c;
+
+ for (c = 0; c < map->nsegs; c++)
+ if ((void*)map->segs[c].addr <= p
+ && (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz)
+ return 1;
+
+ return 0;
+}
+
+inline static void * _dl_funcdesc_for (void *entry_point, void *got_value);
+
+/* The hashcode handling code below is heavily inspired in libiberty's
+ hashtab code, but with most adaptation points and support for
+ deleting elements removed.
+
+ Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Contributed by Vladimir Makarov (vmakarov@cygnus.com). */
+
+inline static unsigned long
+higher_prime_number (unsigned long n)
+{
+ /* These are primes that are near, but slightly smaller than, a
+ power of two. */
+ static const unsigned long primes[] = {
+ (unsigned long) 7,
+ (unsigned long) 13,
+ (unsigned long) 31,
+ (unsigned long) 61,
+ (unsigned long) 127,
+ (unsigned long) 251,
+ (unsigned long) 509,
+ (unsigned long) 1021,
+ (unsigned long) 2039,
+ (unsigned long) 4093,
+ (unsigned long) 8191,
+ (unsigned long) 16381,
+ (unsigned long) 32749,
+ (unsigned long) 65521,
+ (unsigned long) 131071,
+ (unsigned long) 262139,
+ (unsigned long) 524287,
+ (unsigned long) 1048573,
+ (unsigned long) 2097143,
+ (unsigned long) 4194301,
+ (unsigned long) 8388593,
+ (unsigned long) 16777213,
+ (unsigned long) 33554393,
+ (unsigned long) 67108859,
+ (unsigned long) 134217689,
+ (unsigned long) 268435399,
+ (unsigned long) 536870909,
+ (unsigned long) 1073741789,
+ (unsigned long) 2147483647,
+ /* 4294967291L */
+ ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
+ };
+
+ const unsigned long *low = &primes[0];
+ const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])];
+
+ while (low != high)
+ {
+ const unsigned long *mid = low + (high - low) / 2;
+ if (n > *mid)
+ low = mid + 1;
+ else
+ high = mid;
+ }
+
+#if 0
+ /* If we've run out of primes, abort. */
+ if (n > *low)
+ {
+ fprintf (stderr, "Cannot find prime bigger than %lu\n", n);
+ abort ();
+ }
+#endif
+
+ return *low;
+}
+
+struct funcdesc_ht
+{
+ /* Table itself. */
+ struct funcdesc_value **entries;
+
+ /* Current size (in entries) of the hash table */
+ size_t size;
+
+ /* Current number of elements. */
+ size_t n_elements;
+};
+
+inline static int
+hash_pointer (const void *p)
+{
+ return (int) ((long)p >> 3);
+}
+
+inline static struct funcdesc_ht *
+htab_create (void)
+{
+ struct funcdesc_ht *ht = _dl_malloc (sizeof (struct funcdesc_ht));
+
+ if (! ht)
+ return NULL;
+ ht->size = 3;
+ ht->entries = _dl_malloc (sizeof (struct funcdesc_ht_value *) * ht->size);
+ if (! ht->entries)
+ return NULL;
+
+ ht->n_elements = 0;
+
+ _dl_memset (ht->entries, 0, sizeof (struct funcdesc_ht_value *) * ht->size);
+
+ return ht;
+}
+
+/* This is only called from _dl_loadaddr_unmap, so it's safe to call
+ _dl_free(). See the discussion below. */
+inline static void
+htab_delete (struct funcdesc_ht *htab)
+{
+ int i;
+
+ for (i = htab->size - 1; i >= 0; i--)
+ if (htab->entries[i])
+ _dl_free (htab->entries[i]);
+
+ _dl_free (htab->entries);
+ _dl_free (htab);
+}
+
+/* Similar to htab_find_slot, but without several unwanted side effects:
+ - Does not call htab->eq_f when it finds an existing entry.
+ - Does not change the count of elements/searches/collisions in the
+ hash table.
+ This function also assumes there are no deleted entries in the table.
+ HASH is the hash value for the element to be inserted. */
+
+inline static struct funcdesc_value **
+find_empty_slot_for_expand (struct funcdesc_ht *htab, int hash)
+{
+ size_t size = htab->size;
+ unsigned int index = hash % size;
+ struct funcdesc_value **slot = htab->entries + index;
+ int hash2;
+
+ if (! *slot)
+ return slot;
+
+ hash2 = 1 + hash % (size - 2);
+ for (;;)
+ {
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ slot = htab->entries + index;
+ if (! *slot)
+ return slot;
+ }
+}
+
+/* The following function changes size of memory allocated for the
+ entries and repeatedly inserts the table elements. The occupancy
+ of the table after the call will be about 50%. Naturally the hash
+ table must already exist. Remember also that the place of the
+ table entries is changed. If memory allocation failures are allowed,
+ this function will return zero, indicating that the table could not be
+ expanded. If all goes well, it will return a non-zero value. */
+
+inline static int
+htab_expand (struct funcdesc_ht *htab)
+{
+ struct funcdesc_value **oentries;
+ struct funcdesc_value **olimit;
+ struct funcdesc_value **p;
+ struct funcdesc_value **nentries;
+ size_t nsize;
+
+ oentries = htab->entries;
+ olimit = oentries + htab->size;
+
+ /* Resize only when table after removal of unused elements is either
+ too full or too empty. */
+ if (htab->n_elements * 2 > htab->size)
+ nsize = higher_prime_number (htab->n_elements * 2);
+ else
+ nsize = htab->size;
+
+ nentries = _dl_malloc (sizeof (struct funcdesc_value *) * nsize);
+ _dl_memset (nentries, 0, sizeof (struct funcdesc_value *) * nsize);
+ if (nentries == NULL)
+ return 0;
+ htab->entries = nentries;
+ htab->size = nsize;
+
+ p = oentries;
+ do
+ {
+ if (*p)
+ *find_empty_slot_for_expand (htab, hash_pointer ((*p)->entry_point))
+ = *p;
+
+ p++;
+ }
+ while (p < olimit);
+
+#if 0 /* We can't tell whether this was allocated by the _dl_malloc()
+ built into ld.so or malloc() in the main executable or libc,
+ and calling free() for something that wasn't malloc()ed could
+ do Very Bad Things (TM). Take the conservative approach
+ here, potentially wasting as much memory as actually used by
+ the hash table, even if multiple growths occur. That's not
+ so bad as to require some overengineered solution that would
+ enable us to keep track of how it was allocated. */
+ _dl_free (oentries);
+#endif
+ return 1;
+}
+
+/* This function searches for a hash table slot containing an entry
+ equal to the given element. To delete an entry, call this with
+ INSERT = 0, then call htab_clear_slot on the slot returned (possibly
+ after doing some checks). To insert an entry, call this with
+ INSERT = 1, then write the value you want into the returned slot.
+ When inserting an entry, NULL may be returned if memory allocation
+ fails. */
+
+inline static struct funcdesc_value **
+htab_find_slot (struct funcdesc_ht *htab, void *ptr, int insert)
+{
+ unsigned int index;
+ int hash, hash2;
+ size_t size;
+ struct funcdesc_value **entry;
+
+ if (htab->size * 3 <= htab->n_elements * 4
+ && htab_expand (htab) == 0)
+ return NULL;
+
+ hash = hash_pointer (ptr);
+
+ size = htab->size;
+ index = hash % size;
+
+ entry = &htab->entries[index];
+ if (!*entry)
+ goto empty_entry;
+ else if ((*entry)->entry_point == ptr)
+ return entry;
+
+ hash2 = 1 + hash % (size - 2);
+ for (;;)
+ {
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ entry = &htab->entries[index];
+ if (!*entry)
+ goto empty_entry;
+ else if ((*entry)->entry_point == ptr)
+ return entry;
+ }
+
+ empty_entry:
+ if (!insert)
+ return NULL;
+
+ htab->n_elements++;
+ return entry;
+}
+
+void *
+_dl_funcdesc_for (void *entry_point, void *got_value)
+{
+ struct elf_resolve *tpnt = ((void**)got_value)[2];
+ struct funcdesc_ht *ht = tpnt->funcdesc_ht;
+ struct funcdesc_value **entry;
+
+ _dl_assert (got_value == tpnt->loadaddr.got_value);
+
+ if (! ht)
+ {
+ ht = htab_create ();
+ if (! ht)
+ return (void*)-1;
+ tpnt->funcdesc_ht = ht;
+ }
+
+ entry = htab_find_slot (ht, entry_point, 1);
+ if (*entry)
+ {
+ _dl_assert ((*entry)->entry_point == entry_point);
+ return _dl_stabilize_funcdesc (*entry);
+ }
+
+ *entry = _dl_malloc (sizeof (struct funcdesc_value));
+ (*entry)->entry_point = entry_point;
+ (*entry)->got_value = got_value;
+
+ return _dl_stabilize_funcdesc (*entry);
+}
+
+inline static void const *
+_dl_lookup_address (void const *address)
+{
+ struct elf_resolve *rpnt;
+ struct funcdesc_value const *fd;
+
+ /* Make sure we don't make assumptions about its alignment. */
+ asm ("" : "+r" (address));
+
+ if ((Elf32_Addr)address & 7)
+ /* It's not a function descriptor. */
+ return address;
+
+ fd = (struct funcdesc_value const *)address;
+
+ for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next)
+ {
+ if (! rpnt->funcdesc_ht)
+ continue;
+
+ if (fd->got_value != rpnt->loadaddr.got_value)
+ continue;
+
+ address = htab_find_slot (rpnt->funcdesc_ht, (void*)fd->entry_point, 0);
+
+ if (address && *(struct funcdesc_value *const*)address == fd)
+ {
+ address = (*(struct funcdesc_value *const*)address)->entry_point;
+ break;
+ }
+ else
+ address = fd;
+ }
+
+ return address;
+}
+
+void
+__dl_loadaddr_unmap (struct elf32_fdpic_loadaddr loadaddr,
+ struct funcdesc_ht *funcdesc_ht)
+{
+ int i;
+
+ for (i = 0; i < loadaddr.map->nsegs; i++)
+ {
+ struct elf32_fdpic_loadseg *segdata;
+ ssize_t offs;
+ segdata = loadaddr.map->segs + i;
+ offs = (segdata->p_vaddr & ADDR_ALIGN);
+ _dl_munmap ((void*)segdata->addr - offs,
+ segdata->p_memsz + offs);
+ }
+ /* _dl_unmap is only called for dlopen()ed libraries, for which
+ calling free() is safe, or before we've completed the initial
+ relocation, in which case calling free() is probably pointless,
+ but still safe. */
+ _dl_free (loadaddr.map);
+ if (funcdesc_ht)
+ htab_delete (funcdesc_ht);
+}
+
+inline static int
+__dl_is_special_segment (Elf32_Ehdr *epnt,
+ Elf32_Phdr *ppnt)
+{
+ if (ppnt->p_type != PT_LOAD)
+ return 0;
+
+ if ((epnt->e_flags & EF_BFIN_CODE_IN_L1)
+ && !(ppnt->p_flags & PF_W)
+ && (ppnt->p_flags & PF_X))
+ return 1;
+
+ if ((epnt->e_flags & EF_BFIN_DATA_IN_L1)
+ && (ppnt->p_flags & PF_W)
+ && !(ppnt->p_flags & PF_X))
+ return 1;
+
+ return 0;
+}
+
+inline static char *
+__dl_map_segment (Elf32_Ehdr *epnt,
+ Elf32_Phdr *ppnt,
+ int infile,
+ int flags)
+{
+ char *status, *tryaddr, *l1addr;
+ size_t size;
+
+ if ((epnt->e_flags & EF_BFIN_CODE_IN_L1)
+ && !(ppnt->p_flags & PF_W)
+ && (ppnt->p_flags & PF_X)) {
+ status = (char *) _dl_mmap
+ (tryaddr = 0,
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz,
+ LXFLAGS(ppnt->p_flags),
+ flags | MAP_EXECUTABLE | MAP_DENYWRITE,
+ infile, ppnt->p_offset & OFFS_ALIGN);
+ if (_dl_mmap_check_error(status)
+ || (tryaddr && tryaddr != status))
+ return NULL;
+ l1addr = (char *) _dl_sram_alloc (ppnt->p_filesz, L1_INST_SRAM);
+ if (l1addr != NULL)
+ _dl_dma_memcpy (l1addr, status + (ppnt->p_vaddr & ADDR_ALIGN), ppnt->p_filesz);
+ _dl_munmap (status, size);
+ if (l1addr == NULL)
+ return NULL;
+ return l1addr;
+ }
+
+ if ((epnt->e_flags & EF_BFIN_DATA_IN_L1)
+ && (ppnt->p_flags & PF_W)
+ && !(ppnt->p_flags & PF_X)) {
+ l1addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_SRAM);
+ if (l1addr == NULL
+ || (_DL_PREAD (infile, l1addr, ppnt->p_filesz, ppnt->p_offset)
+ != ppnt->p_filesz))
+ return NULL;
+ if (ppnt->p_filesz < ppnt->p_memsz)
+ _dl_memset (l1addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz);
+ return l1addr;
+ }
+
+ return 0;
+}
diff --git a/ldso/ldso/bfin/dl-startup.h b/ldso/ldso/bfin/dl-startup.h
new file mode 100644
index 000000000..09f946e9f
--- /dev/null
+++ b/ldso/ldso/bfin/dl-startup.h
@@ -0,0 +1,155 @@
+ /* Copyright (C) 2003 Red Hat, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+This file is part of uClibc.
+
+uClibc is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as
+published by the Free Software Foundation; either version 2.1 of the
+License, or (at your option) any later version.
+
+uClibc is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with uClibc; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+USA. */
+
+/* Any assembly language/system dependent hacks needed to setup
+ * boot1.c so it will work as expected and cope with whatever platform
+ * specific wierdness is needed for this architecture.
+
+ * We override the default _dl_boot function, and replace it with a
+ * bit of asm. Then call the real _dl_boot function, which is now
+ * named _dl_boot2. */
+
+/* At program start-up, p0 contains a pointer to a
+ elf32_fdpic_loadmap that describes how the executable was loaded
+ into memory. p1 contains a pointer to the interpreter (our!)
+ loadmap, if there is an interpreter, or 0 if we're being run as an
+ executable. p2 holds a pointer to the interpreter's dynamic
+ section, if there is an interpreter, or to the executable's dynamic
+ section, otherwise. If the executable is not dynamic, gr18 is 0.
+
+ We rely on the fact that the linker adds a pointer to the
+ _GLOBAL_OFFSET_TABLE_ as the last ROFIXUP entry, and that
+ __self_reloc returns the relocated pointer to us, so that we can
+ use this value to initialize the PIC register. */
+
+__asm__(
+ " .text\n" \
+ " .global __dl_boot\n" \
+ " .type __dl_boot,@function\n" \
+ "__dl_boot:\n" \
+ " call .Lcall\n" \
+ ".Lcall:\n" \
+ " R4 = RETS;\n" \
+ " SP += -32;\n" \
+ " R5 = P0;\n" \
+ " R6 = P1;\n" \
+ " R7 = P2;\n" \
+ " R0.L = .Lcall;\n" \
+ " R0.H = .Lcall;\n" \
+ " R1.L = __ROFIXUP_LIST__;\n" \
+ " R1.H = __ROFIXUP_LIST__;\n" \
+ " R2.L = __ROFIXUP_END__;\n" \
+ " R2.H = __ROFIXUP_END__;\n" \
+ " R1 = R1 - R0;\n" \
+ " R1 = R1 + R4;\n" \
+ " R2 = R2 - R0;\n" \
+ " R2 = R2 + R4;\n" \
+ " R0 = P1;\n" \
+ " CC = R0 == 0;\n" \
+ " IF CC R0 = P0;\n" \
+ " CALL ___self_reloc;\n" \
+ " P3 = R0;\n" \
+ " P5 = R0;\n" \
+ " R1 = R5;\n" \
+ " R2 = R6;\n" \
+ " [SP + 12] = R7;\n" \
+ " P0 = SP;\n" \
+ " P0 += 24;\n" \
+ " [SP + 16] = P0;\n" \
+ " P0 += 8;\n" \
+ " [SP + 20] = P0;\n" \
+ " CALL __dl_start;\n" \
+ " /* Pass our FINI ptr() to the user in P1 */\n" \
+ " R7 = [P5 + __dl_fini@FUNCDESC_GOT17M4];\n" \
+ " P4 = [SP + 24];\n" \
+ " P3 = [SP + 28];\n" \
+ " P0 = R5;\n" \
+ " SP += 32;\n" \
+ " JUMP (P4);\n" \
+ " .size __dl_boot,.-__dl_boot\n"
+);
+
+#define DL_BOOT(X) \
+static void __attribute__ ((used)) \
+_dl_start (Elf32_Addr dl_boot_got_pointer, \
+ struct elf32_fdpic_loadmap *dl_boot_progmap, \
+ struct elf32_fdpic_loadmap *dl_boot_ldsomap, \
+ Elf32_Dyn *dl_boot_ldso_dyn_pointer, \
+ struct funcdesc_value *dl_main_funcdesc, \
+ X)
+
+struct elf32_fdpic_loadmap;
+
+/*
+ * Get a pointer to the argv array. On many platforms this can be just
+ * the address if the first argument, on other platforms we need to
+ * do something a little more subtle here.
+ */
+#define GET_ARGV(ARGVP, ARGS) ARGVP = (((unsigned long*) ARGS) + 1)
+
+/*
+ * Compute the GOT address. On several platforms, we use assembly
+ * here. on FR-V FDPIC, there's no way to compute the GOT address,
+ * since the offset between text and data is not fixed, so we arrange
+ * for the assembly _dl_boot to pass this value as an argument to
+ * _dl_boot. */
+#define DL_BOOT_COMPUTE_GOT(got) ((got) = dl_boot_got_pointer)
+
+#define DL_BOOT_COMPUTE_DYN(dpnt, got, load_addr) \
+ ((dpnt) = dl_boot_ldso_dyn_pointer)
+
+/*
+ * Here is a macro to perform a relocation. This is only used when
+ * bootstrapping the dynamic loader. RELP is the relocation that we
+ * are performing, REL is the pointer to the address we are relocating.
+ * SYMBOL is the symbol involved in the relocation, and LOAD is the
+ * load address.
+ */
+#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD,SYMTAB) \
+ switch(ELF32_R_TYPE((RELP)->r_info)){ \
+ case R_BFIN_byte4_data: \
+ *(REL) += (SYMBOL); \
+ break; \
+ case R_BFIN_FUNCDESC_VALUE: \
+ { \
+ struct funcdesc_value fv = { \
+ (void*)((SYMBOL) + *(REL)), \
+ (LOAD).got_value \
+ }; \
+ *(struct funcdesc_value volatile *)(REL) = fv; \
+ break; \
+ } \
+ default: \
+ _dl_exit(1); \
+ }
+
+/*
+ * Transfer control to the user's application, once the dynamic loader
+ * is done. We return the address of the function's entry point to
+ * _dl_boot, see boot1_arch.h.
+ */
+#define START() do { \
+ struct elf_resolve *exec_mod = _dl_loaded_modules; \
+ dl_main_funcdesc->entry_point = _dl_elf_main; \
+ while (exec_mod->libtype != elf_executable) \
+ exec_mod = exec_mod->next; \
+ dl_main_funcdesc->got_value = exec_mod->loadaddr.got_value; \
+ return; \
+} while (0)
diff --git a/ldso/ldso/bfin/dl-syscalls.h b/ldso/ldso/bfin/dl-syscalls.h
new file mode 100644
index 000000000..af0b425d3
--- /dev/null
+++ b/ldso/ldso/bfin/dl-syscalls.h
@@ -0,0 +1,208 @@
+/* Copyright (C) 2003, 2004 Red Hat, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+This file is part of uClibc.
+
+uClibc is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as
+published by the Free Software Foundation; either version 2.1 of the
+License, or (at your option) any later version.
+
+uClibc is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with uClibc; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+USA. */
+
+/* We can't use the real errno in ldso, since it has not yet
+ * been dynamicly linked in yet. */
+#include "sys/syscall.h"
+extern int _dl_errno;
+#undef __set_errno
+#define __set_errno(X) {(_dl_errno) = (X);}
+#include <sys/mman.h>
+
+/* The code below is extracted from libc/sysdeps/linux/frv/_mmap.c */
+
+#if DYNAMIC_LOADER_IN_SIMULATOR
+#define __NR___syscall_mmap2 __NR_mmap2
+static inline _syscall6(__ptr_t, __syscall_mmap2, __ptr_t, addr,
+ size_t, len, int, prot, int, flags, int, fd, off_t, offset);
+
+/* Make sure we don't get another definition of _dl_mmap from the
+ machine-independent code. */
+#undef __NR_mmap
+#undef __NR_mmap2
+
+/* This is always 12, even on architectures where PAGE_SHIFT != 12. */
+# ifndef MMAP2_PAGE_SHIFT
+# define MMAP2_PAGE_SHIFT 12
+# endif
+
+#include <bits/uClibc_page.h> /* for PAGE_SIZE */
+inline static void *_dl_memset(void*,int,size_t);
+inline static ssize_t _dl_pread(int fd, void *buf, size_t count, off_t offset);
+
+static __ptr_t
+_dl_mmap(__ptr_t addr, size_t len, int prot, int flags, int fd, __off_t offset)
+{
+ size_t plen = (len + PAGE_SIZE - 1) & -PAGE_SIZE;
+
+/* This is a hack to enable the dynamic loader to run within a
+ simulator that doesn't support mmap, with a number of very ugly
+ tricks. Also, it's not as useful as it sounds, since only dynamic
+ executables without DT_NEEDED dependencies can be run. AFAIK, they
+ can only be created with -pie. This trick suffices to enable the
+ dynamic loader to obtain a blank page that it maps early in the
+ bootstrap. */
+ if ((flags & MAP_FIXED) == 0)
+ {
+ void *_dl_mmap_base = 0;
+ __ptr_t *ret = 0;
+
+ if (! _dl_mmap_base)
+ {
+ void *stack;
+ asm ("mov sp, %0" : "=r" (stack));
+ _dl_mmap_base = (void *)(((long)stack + 2 * PAGE_SIZE) & -PAGE_SIZE);
+ retry:
+ if (((void **)_dl_mmap_base)[0] == _dl_mmap_base
+ && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base
+ && (((void **)_dl_mmap_base)[177]
+ == ((void **)_dl_mmap_base)[771]))
+ {
+ while (((void**)_dl_mmap_base)[177])
+ {
+ _dl_mmap_base = ((void**)_dl_mmap_base)[177];
+ if (!(((void **)_dl_mmap_base)[0] == _dl_mmap_base
+ && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base
+ && (((void **)_dl_mmap_base)[177]
+ == ((void**)_dl_mmap_base)[771])))
+ ((void(*)())0)();
+ }
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < (int)PAGE_SIZE; i++)
+ if (*(char*)(_dl_mmap_base + i))
+ break;
+ if (i != PAGE_SIZE)
+ {
+ _dl_mmap_base = (void*)((long)_dl_mmap_base + PAGE_SIZE);
+ goto retry;
+ }
+ ((void**)_dl_mmap_base)[-1] =
+ ((void**)_dl_mmap_base)[0] =
+ ((void**)_dl_mmap_base)[1023] =
+ _dl_mmap_base;
+ }
+ }
+
+ if (_dl_mmap_base)
+ {
+ if (!(((void **)_dl_mmap_base)[0] == _dl_mmap_base
+ && ((void **)_dl_mmap_base)[1023] == _dl_mmap_base
+ && (((void **)_dl_mmap_base)[177]
+ == ((void**)_dl_mmap_base)[771])))
+ ((void(*)())0)();
+ ret = (__ptr_t)((char*)_dl_mmap_base + PAGE_SIZE);
+ _dl_mmap_base =
+ ((void**)_dl_mmap_base)[177] =
+ ((void**)_dl_mmap_base)[771] =
+ (char*)_dl_mmap_base + plen + PAGE_SIZE;
+ ((void**)_dl_mmap_base)[0] =
+ ((void**)_dl_mmap_base)[1023] =
+ _dl_mmap_base;
+ }
+
+ if ((flags & MAP_ANONYMOUS) != 0)
+ {
+ _dl_memset (ret, 0, plen);
+ return ret;
+ }
+
+ flags |= MAP_FIXED;
+ addr = ret;
+ }
+ if (offset & ((1 << MMAP2_PAGE_SHIFT) - 1)) {
+#if 0
+ __set_errno (EINVAL);
+#endif
+ return MAP_FAILED;
+ }
+ if ((flags & MAP_FIXED) != 0)
+ {
+ if (_dl_pread(fd, addr, len, offset) != (ssize_t)len)
+ return (void*)MAP_FAILED;
+ if (plen != len)
+ _dl_memset (addr + len, 0, plen - len);
+ return addr;
+ }
+ return(__syscall_mmap2(addr, len, prot, flags, fd, (off_t) (offset >> MMAP2_PAGE_SHIFT)));
+}
+#endif
+
+#ifdef __NR_pread
+#ifdef DYNAMIC_LOADER_IN_SIMULATOR
+#include <unistd.h>
+
+#define __NR___syscall_lseek __NR_lseek
+inline static unsigned long _dl_read(int fd, const void *buf, unsigned long count);
+
+inline static _syscall3(__off_t, __syscall_lseek, int, fd, __off_t, offset,
+ int, whence);
+inline static ssize_t
+_dl_pread(int fd, void *buf, size_t count, off_t offset)
+{
+ __off_t orig = __syscall_lseek (fd, 0, SEEK_CUR);
+ ssize_t ret;
+
+ if (orig == -1)
+ return -1;
+
+ if (__syscall_lseek (fd, offset, SEEK_SET) != offset)
+ return -1;
+
+ ret = _dl_read (fd, buf, count);
+
+ if (__syscall_lseek (fd, orig, SEEK_SET) != orig)
+ ((void(*)())0)();
+
+ return ret;
+}
+#else
+#define __NR___syscall_pread __NR_pread
+inline static _syscall5(ssize_t, __syscall_pread, int, fd, void *, buf,
+ size_t, count, off_t, offset_hi, off_t, offset_lo);
+
+inline static ssize_t
+_dl_pread(int fd, void *buf, size_t count, off_t offset)
+{
+ return(__syscall_pread(fd,buf,count,__LONG_LONG_PAIR (offset >> 31, offset)));
+}
+#endif
+#endif
+
+#ifdef __NR_sram_alloc
+#define __NR__dl_sram_alloc __NR_sram_alloc
+inline static _syscall2(__ptr_t, _dl_sram_alloc,
+ size_t, len, unsigned long, flags);
+#endif
+
+#ifdef __NR_sram_free
+#define __NR__dl_sram_free __NR_sram_free
+inline static _syscall1(int, _dl_sram_free, __ptr_t, addr);
+#endif
+
+#ifdef __NR_dma_memcpy
+#define __NR__dl_dma_memcpy __NR_dma_memcpy
+inline static _syscall3(__ptr_t, _dl_dma_memcpy,
+ __ptr_t, dest, __ptr_t, src, size_t, len);
+#endif
+
+#define __UCLIBC_MMAP_HAS_6_ARGS__
diff --git a/ldso/ldso/bfin/dl-sysdep.h b/ldso/ldso/bfin/dl-sysdep.h
new file mode 100644
index 000000000..0376c6151
--- /dev/null
+++ b/ldso/ldso/bfin/dl-sysdep.h
@@ -0,0 +1,216 @@
+ /* Copyright (C) 2003, 2004 Red Hat, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+ Based on ../i386/dl-sysdep.h
+
+This file is part of uClibc.
+
+uClibc is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as
+published by the Free Software Foundation; either version 2.1 of the
+License, or (at your option) any later version.
+
+uClibc is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with uClibc; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+USA. */
+
+/*
+ * Various assembly language/system dependent hacks that are required
+ * so that we can minimize the amount of platform specific code.
+ */
+
+/*
+ * Define this if the system uses RELOCA.
+ */
+#undef ELF_USES_RELOCA
+
+/* JMPREL relocs are inside the DT_RELA table. */
+#define ELF_MACHINE_PLTREL_OVERLAP
+
+#define DL_NO_COPY_RELOCS
+
+/*
+ * Initialization sequence for a GOT. Copy the resolver function
+ * descriptor and the pointer to the elf_resolve/link_map data
+ * structure. Initialize the got_value in the module while at that.
+ */
+#define INIT_GOT(GOT_BASE,MODULE) \
+{ \
+ (MODULE)->loadaddr.got_value = (GOT_BASE); \
+ GOT_BASE[0] = ((unsigned long *)&_dl_linux_resolve)[0]; \
+ GOT_BASE[1] = ((unsigned long *)&_dl_linux_resolve)[1]; \
+ GOT_BASE[2] = (unsigned long) MODULE; \
+}
+
+/* Here we define the magic numbers that this dynamic loader should accept */
+#define MAGIC1 EM_BLACKFIN
+#undef MAGIC2
+
+/* Used for error messages */
+#define ELF_TARGET "BFIN"
+
+struct elf_resolve;
+
+struct funcdesc_value
+{
+ void *entry_point;
+ void *got_value;
+} __attribute__((__aligned__(8)));
+
+
+extern int _dl_linux_resolve(void) __attribute__((__visibility__("hidden")));
+
+/* 4KiB page alignment. Should perhaps be made dynamic using
+ getpagesize(), based on AT_PAGESZ from auxvt? */
+#define PAGE_ALIGN 0xfffff000
+#define ADDR_ALIGN 0xfff
+#define OFFS_ALIGN 0x7ffff000
+
+struct funcdesc_ht;
+
+#undef SEND_EARLY_STDERR
+#define SEND_EARLY_STDERR(S) \
+ do { \
+ static const char __attribute__((section(".text"))) __s[] = (S); \
+ const char *__p, *__scratch; \
+ asm ("call 1f;\n1:\n\t" \
+ "%1 = RETS;\n\t" \
+ "%0 = [%3 + 1b@GOT17M4];\n\t" \
+ "%1 = %1 - %0;\n\t" \
+ "%1 = %1 + %2;\n\t" \
+ : "=&d" (__scratch), "=&d" (__p) \
+ : "d" (__s), "a" (dl_boot_got_pointer) : "RETS"); \
+ SEND_STDERR (__p); \
+ { int __t; \
+ for (__t = 0; __t < 0x1000000; __t++) asm volatile (""); } \
+ } while (0)
+
+#define DL_LOADADDR_TYPE struct elf32_fdpic_loadaddr
+
+#define DL_RELOC_ADDR(LOADADDR, ADDR) \
+ (__reloc_pointer ((void*)(ADDR), (LOADADDR).map))
+
+#define DL_ADDR_TO_FUNC_PTR(ADDR, LOADADDR) \
+ ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value))
+
+#define _dl_stabilize_funcdesc(val) \
+ ({ asm ("" : "+m" (*(val))); (val); })
+
+#define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \
+ ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \
+ void (*pf)(void) = (void*) _dl_stabilize_funcdesc (&fd); \
+ (* SIGNATURE pf)(__VA_ARGS__); })
+
+#define DL_INIT_LOADADDR_BOOT(LOADADDR, BASEADDR) \
+ (__dl_init_loadaddr_map (&(LOADADDR), dl_boot_got_pointer, \
+ dl_boot_ldsomap ?: dl_boot_progmap))
+
+#define DL_INIT_LOADADDR_PROG(LOADADDR, BASEADDR) \
+ (__dl_init_loadaddr_map (&(LOADADDR), 0, dl_boot_progmap))
+
+#define DL_INIT_LOADADDR_EXTRA_DECLS \
+ int dl_init_loadaddr_load_count;
+#define DL_INIT_LOADADDR(LOADADDR, BASEADDR, PHDR, PHDRCNT) \
+ (dl_init_loadaddr_load_count = \
+ __dl_init_loadaddr (&(LOADADDR), (PHDR), (PHDRCNT)))
+#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \
+ (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \
+ dl_init_loadaddr_load_count))
+#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \
+ (__dl_loadaddr_unmap ((LOADADDR), (NULL)))
+#define DL_LIB_UNMAP(LIB, LEN) \
+ (__dl_loadaddr_unmap ((LIB)->loadaddr, (LIB)->funcdesc_ht))
+#define DL_LOADADDR_BASE(LOADADDR) \
+ ((LOADADDR).got_value)
+
+/* This is called from dladdr(), such that we map a function
+ descriptor's address to the function's entry point before trying to
+ find in which library it's defined. */
+#define DL_LOOKUP_ADDRESS(ADDRESS) (_dl_lookup_address (ADDRESS))
+
+#define DL_ADDR_IN_LOADADDR(ADDR, TPNT, TFROM) \
+ (! (TFROM) && __dl_addr_in_loadaddr ((void*)(ADDR), (TPNT)->loadaddr))
+
+/* We only support loading FDPIC independently-relocatable shared
+ libraries. It probably wouldn't be too hard to support loading
+ shared libraries that require relocation by the same amount, but we
+ don't know that they exist or would be useful, and the dynamic
+ loader code could leak the whole-library map unless we keeping a
+ bit more state for DL_LOADADDR_UNMAP and DL_LIB_UNMAP, so let's
+ keep things simple for now. */
+#define DL_CHECK_LIB_TYPE(epnt, piclib, _dl_progname, libname) \
+do \
+{ \
+ if (((epnt)->e_flags & EF_BFIN_FDPIC) && ! ((epnt)->e_flags & EF_BFIN_PIC)) \
+ (piclib) = 2; \
+ else \
+ { \
+ _dl_internal_error_number = LD_ERROR_NOTDYN; \
+ _dl_dprintf(2, "%s: '%s' is not an FDPIC shared library" \
+ "\n", (_dl_progname), (libname)); \
+ _dl_close(infile); \
+ return NULL; \
+ } \
+\
+} \
+while (0)
+
+/* We want want to apply all relocations in the interpreter during
+ bootstrap. Because of this, we have to skip the interpreter
+ relocations in _dl_parse_relocation_information(), see
+ elfinterp.c. */
+#define DL_SKIP_BOOTSTRAP_RELOC(SYMTAB, INDEX, STRTAB) 0
+
+#ifdef __NR_pread
+#define _DL_PREAD(FD, BUF, SIZE, OFFSET) \
+ (_dl_pread((FD), (BUF), (SIZE), (OFFSET)))
+#endif
+
+/* We want to return to dlsym() a function descriptor if the symbol
+ turns out to be a function. */
+#define DL_FIND_HASH_VALUE(TPNT, TYPE_CLASS, SYM) \
+ (((TYPE_CLASS) & ELF_RTYPE_CLASS_DLSYM) \
+ && ELF32_ST_TYPE((SYM)->st_info) == STT_FUNC \
+ ? _dl_funcdesc_for (DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value), \
+ (TPNT)->loadaddr.got_value) \
+ : DL_RELOC_ADDR ((TPNT)->loadaddr, (SYM)->st_value))
+
+#define DL_IS_SPECIAL_SEGMENT(EPNT, PPNT) \
+ __dl_is_special_segment(EPNT, PPNT)
+#define DL_MAP_SEGMENT(EPNT, PPNT, INFILE, FLAGS) \
+ __dl_map_segment (EPNT, PPNT, INFILE, FLAGS)
+
+#define DL_GET_READY_TO_RUN_EXTRA_PARMS \
+ , struct elf32_fdpic_loadmap *dl_boot_progmap, Elf32_Addr dl_boot_got_pointer
+#define DL_GET_READY_TO_RUN_EXTRA_ARGS \
+ , dl_boot_progmap, dl_boot_got_pointer
+
+
+#ifdef __USE_GNU
+# include <link.h>
+#else
+# define __USE_GNU
+# include <link.h>
+# undef __USE_GNU
+#endif
+
+#include <elf.h>
+static inline void
+elf_machine_relative (DL_LOADADDR_TYPE load_off, const Elf32_Addr rel_addr,
+ Elf32_Word relative_count)
+{
+#if 0
+ Elf32_Rel * rpnt = (void *) rel_addr;
+ --rpnt;
+ do {
+ Elf32_Addr *const reloc_addr = (void *) (load_off + (++rpnt)->r_offset);
+
+ *reloc_addr = DL_RELOC_ADDR (load_off, *reloc_addr);
+ } while (--relative_count);
+#endif
+}
diff --git a/ldso/ldso/bfin/elfinterp.c b/ldso/ldso/bfin/elfinterp.c
new file mode 100644
index 000000000..792c45c21
--- /dev/null
+++ b/ldso/ldso/bfin/elfinterp.c
@@ -0,0 +1,352 @@
+/* Blackfin ELF shared library loader suppport
+ Copyright (C) 2003, 2004 Red Hat, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+ Lots of code copied from ../i386/elfinterp.c, so:
+ Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
+ David Engel, Hongjiu Lu and Mitch D'Souza
+ Copyright (C) 2001-2002, Erik Andersen
+ All rights reserved.
+
+This file is part of uClibc.
+
+uClibc is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as
+published by the Free Software Foundation; either version 2.1 of the
+License, or (at your option) any later version.
+
+uClibc is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with uClibc; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+USA. */
+
+#include <sys/cdefs.h> /* __attribute_used__ */
+
+/* Program to load an ELF binary on a linux system, and run it.
+ References to symbols in sharable libraries can be resolved by either
+ an ELF sharable library or a linux style of shared library. */
+
+/* Disclaimer: I have never seen any AT&T source code for SVr4, nor have
+ I ever taken any courses on internals. This program was developed using
+ information available through the book "UNIX SYSTEM V RELEASE 4,
+ Programmers guide: Ansi C and Programming Support Tools", which did
+ a more than adequate job of explaining everything required to get this
+ working. */
+
+struct funcdesc_value volatile *__attribute__((__visibility__("hidden")))
+_dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry)
+{
+ int reloc_type;
+ ELF_RELOC *this_reloc;
+ char *strtab;
+ ElfW(Sym) *symtab;
+ int symtab_index;
+ char *rel_addr;
+ struct elf_resolve *new_tpnt;
+ char *new_addr;
+ struct funcdesc_value funcval;
+ struct funcdesc_value volatile *got_entry;
+ char *symname;
+
+ rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL];
+
+ this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry);
+ reloc_type = ELF_R_TYPE(this_reloc->r_info);
+ symtab_index = ELF_R_SYM(this_reloc->r_info);
+
+ symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB];
+ strtab = (char *) tpnt->dynamic_info[DT_STRTAB];
+ symname= strtab + symtab[symtab_index].st_name;
+
+ if (reloc_type != R_BFIN_FUNCDESC_VALUE) {
+ _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n",
+ _dl_progname);
+ _dl_exit(1);
+ }
+
+ /* Address of GOT entry fix up */
+ got_entry = (struct funcdesc_value *) DL_RELOC_ADDR(tpnt->loadaddr, this_reloc->r_offset);
+
+ /* Get the address to be used to fill in the GOT entry. */
+ new_addr = _dl_find_hash_mod(symname, tpnt->symbol_scope, NULL, 0,
+ &new_tpnt);
+ if (!new_addr) {
+ new_addr = _dl_find_hash_mod(symname, NULL, NULL, 0,
+ &new_tpnt);
+ if (!new_addr) {
+ _dl_dprintf(2, "%s: can't resolve symbol '%s'\n",
+ _dl_progname, symname);
+ _dl_exit(1);
+ }
+ }
+
+ funcval.entry_point = new_addr;
+ funcval.got_value = new_tpnt->loadaddr.got_value;
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug_bindings) {
+ _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname);
+ if (_dl_debug_detail)
+ _dl_dprintf(_dl_debug_file,
+ "\n\tpatched (%x,%x) ==> (%x,%x) @ %x\n",
+ got_entry->entry_point, got_entry->got_value,
+ funcval.entry_point, funcval.got_value,
+ got_entry);
+ }
+ if (1 || !_dl_debug_nofixups) {
+ *got_entry = funcval;
+ }
+#else
+ *got_entry = funcval;
+#endif
+
+ return got_entry;
+}
+
+static int
+_dl_parse(struct elf_resolve *tpnt, struct dyn_elf *scope,
+ unsigned long rel_addr, unsigned long rel_size,
+ int (*reloc_fnc) (struct elf_resolve *tpnt, struct dyn_elf *scope,
+ ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab))
+{
+ unsigned int i;
+ char *strtab;
+ ElfW(Sym) *symtab;
+ ELF_RELOC *rpnt;
+ int symtab_index;
+
+ /* Now parse the relocation information */
+ rpnt = (ELF_RELOC *) rel_addr;
+ rel_size = rel_size / sizeof(ELF_RELOC);
+
+ symtab = (ElfW(Sym) *) tpnt->dynamic_info[DT_SYMTAB];
+ strtab = (char *) tpnt->dynamic_info[DT_STRTAB];
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ int res;
+
+ symtab_index = ELF_R_SYM(rpnt->r_info);
+ debug_sym(symtab,strtab,symtab_index);
+ debug_reloc(symtab,strtab,rpnt);
+
+ res = reloc_fnc (tpnt, scope, rpnt, symtab, strtab);
+
+ if (res==0) continue;
+
+ _dl_dprintf(2, "\n%s: ",_dl_progname);
+
+ if (symtab_index)
+ _dl_dprintf(2, "symbol '%s': ", strtab + symtab[symtab_index].st_name);
+
+ if (res <0) {
+ int reloc_type = ELF_R_TYPE(rpnt->r_info);
+#if defined (__SUPPORT_LD_DEBUG__)
+ _dl_dprintf(2, "can't handle reloc type %s\n ", _dl_reltypes(reloc_type));
+#else
+ _dl_dprintf(2, "can't handle reloc type %x\n", reloc_type);
+#endif
+ _dl_exit(-res);
+ } else if (res >0) {
+ _dl_dprintf(2, "can't resolve symbol\n");
+ return res;
+ }
+ }
+ return 0;
+}
+
+static int
+_dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope,
+ ELF_RELOC *rpnt, ElfW(Sym) *symtab, char *strtab)
+{
+ int reloc_type;
+ int symtab_index;
+ char *symname;
+ unsigned long reloc_value = 0, *reloc_addr;
+ struct { unsigned long v; } __attribute__((__packed__))
+ *reloc_addr_packed;
+ unsigned long symbol_addr;
+ struct elf_resolve *symbol_tpnt;
+ struct funcdesc_value funcval;
+#if defined (__SUPPORT_LD_DEBUG__)
+ unsigned long old_val;
+#endif
+
+ reloc_addr = (unsigned long *) DL_RELOC_ADDR(tpnt->loadaddr, rpnt->r_offset);
+ __asm__ ("" : "=r" (reloc_addr_packed) : "0" (reloc_addr));
+ reloc_type = ELF_R_TYPE(rpnt->r_info);
+ symtab_index = ELF_R_SYM(rpnt->r_info);
+ symbol_addr = 0;
+ symname = strtab + symtab[symtab_index].st_name;
+
+ if (ELF_ST_BIND (symtab[symtab_index].st_info) == STB_LOCAL) {
+ symbol_addr = (unsigned long) DL_RELOC_ADDR(tpnt->loadaddr, symtab[symtab_index].st_value);
+ symbol_tpnt = tpnt;
+ } else {
+
+ symbol_addr = (unsigned long)
+ _dl_find_hash_mod(symname, scope, NULL, 0, &symbol_tpnt);
+
+ /*
+ * We want to allow undefined references to weak symbols - this might
+ * have been intentional. We should not be linking local symbols
+ * here, so all bases should be covered.
+ */
+
+ if (!symbol_addr && ELF_ST_BIND(symtab[symtab_index].st_info) != STB_WEAK) {
+ _dl_dprintf (2, "%s: can't resolve symbol '%s'\n",
+ _dl_progname, strtab + symtab[symtab_index].st_name);
+ _dl_exit (1);
+ }
+ }
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug_reloc && _dl_debug_detail)
+ {
+ if ((long)reloc_addr_packed & 3)
+ old_val = reloc_addr_packed->v;
+ else
+ old_val = *reloc_addr;
+ }
+ else
+ old_val = 0;
+#endif
+ switch (reloc_type) {
+ case R_BFIN_unused0:
+ break;
+ case R_BFIN_byte4_data:
+ if ((long)reloc_addr_packed & 3)
+ reloc_value = reloc_addr_packed->v += symbol_addr;
+ else
+ reloc_value = *reloc_addr += symbol_addr;
+ break;
+ case R_BFIN_FUNCDESC_VALUE:
+ funcval.entry_point = (void*)symbol_addr;
+ /* The addend of FUNCDESC_VALUE
+ relocations referencing global
+ symbols must be ignored, because it
+ may hold the address of a lazy PLT
+ entry. */
+ if (ELF_ST_BIND(symtab[symtab_index].st_info) == STB_LOCAL)
+ funcval.entry_point += *reloc_addr;
+ reloc_value = (unsigned long)funcval.entry_point;
+ if (symbol_addr)
+ funcval.got_value
+ = symbol_tpnt->loadaddr.got_value;
+ else
+ funcval.got_value = 0;
+ __asm__ ("%0 = %2; %1 = %H2;"
+ : "=m" (*(struct funcdesc_value *)reloc_addr), "=m" (((long *)reloc_addr)[1])
+ : "d" (funcval));
+ break;
+ case R_BFIN_FUNCDESC:
+ if ((long)reloc_addr_packed & 3)
+ reloc_value = reloc_addr_packed->v;
+ else
+ reloc_value = *reloc_addr;
+ if (symbol_addr)
+ reloc_value = (unsigned long)_dl_funcdesc_for
+ ((char *)symbol_addr + reloc_value,
+ symbol_tpnt->loadaddr.got_value);
+ else
+ reloc_value = 0;
+ if ((long)reloc_addr_packed & 3)
+ reloc_addr_packed->v = reloc_value;
+ else
+ *reloc_addr = reloc_value;
+ break;
+ default:
+ return -1;
+ }
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug_reloc && _dl_debug_detail) {
+ _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, reloc_value, reloc_addr);
+ switch (reloc_type) {
+ case R_BFIN_FUNCDESC_VALUE:
+ _dl_dprintf(_dl_debug_file, " got %x", ((struct funcdesc_value *)reloc_value)->got_value);
+ break;
+ case R_BFIN_FUNCDESC:
+ if (! reloc_value)
+ break;
+ _dl_dprintf(_dl_debug_file, " funcdesc (%x,%x)",
+ ((struct funcdesc_value *)reloc_value)->entry_point,
+ ((struct funcdesc_value *)reloc_value)->got_value);
+ break;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static int
+_dl_do_lazy_reloc (struct elf_resolve *tpnt,
+ struct dyn_elf *scope __attribute__((unused)),
+ ELF_RELOC *rpnt, ElfW(Sym) *symtab __attribute__((unused)),
+ char *strtab __attribute__((unused)))
+{
+ int reloc_type;
+ struct funcdesc_value volatile *reloc_addr;
+ struct funcdesc_value funcval;
+#if defined (__SUPPORT_LD_DEBUG__)
+ unsigned long old_val;
+#endif
+
+ reloc_addr = (struct funcdesc_value *) DL_RELOC_ADDR(tpnt->loadaddr, rpnt->r_offset);
+ reloc_type = ELF_R_TYPE(rpnt->r_info);
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ old_val = (unsigned long)reloc_addr->entry_point;
+#endif
+ switch (reloc_type) {
+ case R_BFIN_unused0:
+ break;
+ case R_BFIN_FUNCDESC_VALUE:
+ funcval = *reloc_addr;
+ funcval.entry_point = DL_RELOC_ADDR(tpnt->loadaddr, funcval.entry_point);
+ funcval.got_value = tpnt->loadaddr.got_value;
+ *reloc_addr = funcval;
+ break;
+ default:
+ return -1;
+ }
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug_reloc && _dl_debug_detail)
+ _dl_dprintf(_dl_debug_file, "\tpatched: %x ==> %x @ %x", old_val, reloc_addr->entry_point, reloc_addr);
+#endif
+ return 0;
+
+}
+
+void
+_dl_parse_lazy_relocation_information
+(struct dyn_elf *rpnt, unsigned long rel_addr, unsigned long rel_size)
+{
+ _dl_parse(rpnt->dyn, NULL, rel_addr, rel_size, _dl_do_lazy_reloc);
+}
+
+int
+_dl_parse_relocation_information
+(struct dyn_elf *rpnt, unsigned long rel_addr, unsigned long rel_size)
+{
+ return _dl_parse(rpnt->dyn, rpnt->dyn->symbol_scope, rel_addr, rel_size, _dl_do_reloc);
+}
+
+/* We don't have copy relocs. */
+
+int
+_dl_parse_copy_information
+(struct dyn_elf *rpnt __attribute__((unused)),
+ unsigned long rel_addr __attribute__((unused)),
+ unsigned long rel_size __attribute__((unused)))
+{
+ return 0;
+}
+
+#ifndef LIBDL
+# include "../../libc/sysdeps/linux/bfin/crtreloc.c"
+#endif
+
diff --git a/ldso/ldso/bfin/resolve.S b/ldso/ldso/bfin/resolve.S
new file mode 100644
index 000000000..ae7f4a4c5
--- /dev/null
+++ b/ldso/ldso/bfin/resolve.S
@@ -0,0 +1,77 @@
+ /* Copyright (C) 2003 Red Hat, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+This file is part of uClibc.
+
+uClibc is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as
+published by the Free Software Foundation; either version 2.1 of the
+License, or (at your option) any later version.
+
+uClibc is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with uClibc; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
+USA. */
+
+ /* The function below is tail-called by resolver stubs when a
+ lazily-bound function is called. It must preserve all
+ registers that could be used to pass arguments to the actual
+ function. Upon _dl_linux_resolve entry, GR14 holds the
+ address of a lazy PLT entry, so @(GR14,-4) is the lazy
+ relocation number that we have to pass to _dl_linux_resolver.
+ GR15 holds the caller's GOT, from which we extract the
+ elf_resolve* that _dl_linux_resolver needs as well.
+
+ _dl_linux_resolver() figures out where the jump symbol is
+ _really_ supposed to have jumped to and returns that to us.
+ Once we have that, we prepare to tail-call the actual
+ function, clean up after ourselves, restoring the original
+ arguments, then jump to the fixed up address. */
+
+ .text
+ .p2align 4
+
+ .hidden __dl_linux_resolve
+ .global __dl_linux_resolve
+ .type __dl_linux_resolve,@function
+
+__dl_linux_resolve:
+ /* Preserve arguments. */
+ [--SP] = RETS;
+ [--SP] = P0;
+ [--SP] = R0;
+ [--SP] = R1;
+ [--SP] = R2;
+ sp += -12;
+
+ /* Prepare to call _dl_linux_resolver. */
+ R0 = [P3 + 8];
+ /* Not aligned for space reasons. */
+ R1 = W[P1 + -4] (Z);
+ P1 += -2;
+ R1.H = W[P1];
+
+ P3 = R3;
+ CALL __dl_linux_resolver;
+
+ /* Move aside return value that contains the FUNCDESC_VALUE. */
+ P3 = R0;
+ P1 = [P3];
+ P3 = [P3 + 4];
+
+ /* Restore arguments. */
+ sp += 12;
+ R2 = [SP++];
+ R1 = [SP++];
+ R0 = [SP++];
+ P0 = [SP++];
+ RETS = [SP++];
+
+ /* Now jump to the actual function. */
+ JUMP (P1);
+ .size __dl_linux_resolve, . - __dl_linux_resolve