summaryrefslogtreecommitdiff
path: root/ldso
diff options
context:
space:
mode:
Diffstat (limited to 'ldso')
-rw-r--r--ldso/include/dl-hash.h2
-rw-r--r--ldso/include/dl-syscall.h10
-rw-r--r--ldso/include/inline-hashtab.h265
-rw-r--r--ldso/include/ldsodefs.h7
-rw-r--r--ldso/include/tlsdeschtab.h119
-rw-r--r--ldso/ldso/Makefile.in8
-rw-r--r--ldso/ldso/arm/dl-sysdep.h2
-rw-r--r--ldso/ldso/arm/elfinterp.c4
-rw-r--r--ldso/ldso/avr32/dl-sysdep.h3
-rw-r--r--ldso/ldso/dl-elf.c92
-rw-r--r--ldso/ldso/dl-tls.c29
-rw-r--r--ldso/ldso/fdpic/dl-inlines.h270
-rw-r--r--ldso/ldso/ldso.c18
-rw-r--r--ldso/ldso/mips/elfinterp.c4
-rw-r--r--ldso/ldso/xtensa/dl-debug.h77
-rw-r--r--ldso/ldso/xtensa/dl-startup.h2
-rw-r--r--ldso/ldso/xtensa/dl-sysdep.h9
-rw-r--r--ldso/ldso/xtensa/dl-tlsdesc.S96
-rw-r--r--ldso/ldso/xtensa/elfinterp.c51
19 files changed, 686 insertions, 382 deletions
diff --git a/ldso/include/dl-hash.h b/ldso/include/dl-hash.h
index e1e3e3f95..18f21aefa 100644
--- a/ldso/include/dl-hash.h
+++ b/ldso/include/dl-hash.h
@@ -70,6 +70,8 @@ struct elf_resolve {
size_t l_tls_modid;
/* Nonzero if _dl_init_static_tls should be called for this module */
unsigned int l_need_tls_init:1;
+ /* Address of TLS descriptor hash table. */
+ void *l_tlsdesc_table;
#endif
ElfW(Addr) mapaddr;
diff --git a/ldso/include/dl-syscall.h b/ldso/include/dl-syscall.h
index 675b93ae8..ac4c57e4e 100644
--- a/ldso/include/dl-syscall.h
+++ b/ldso/include/dl-syscall.h
@@ -25,11 +25,7 @@ extern int _dl_errno;
/* Pull in whatever this particular arch's kernel thinks the kernel version of
* struct stat should look like. It turns out that each arch has a different
* opinion on the subject, and different kernel revs use different names... */
-#if defined(__sparc_v9__) && (__WORDSIZE == 64)
-#define kernel_stat64 stat
-#else
#define kernel_stat stat
-#endif
#include <bits/kernel_stat.h>
#include <bits/kernel_types.h>
@@ -138,14 +134,10 @@ static __always_inline _syscall0(gid_t, _dl_getegid)
#define __NR__dl_getpid __NR_getpid
static __always_inline _syscall0(gid_t, _dl_getpid)
-#if defined __NR_readlinkat && !defined __NR_readlink
+#if defined __NR_readlinkat
# define __NR__dl_readlink __NR_readlinkat
static __always_inline _syscall4(int, _dl_readlink, int, id, const char *, path,
char *, buf, size_t, bufsiz)
-#elif defined __NR_readlink
-# define __NR__dl_readlink __NR_readlink
-static __always_inline _syscall3(int, _dl_readlink, const char *, path, char *, buf,
- size_t, bufsiz)
#endif
#ifdef __NR_pread64
diff --git a/ldso/include/inline-hashtab.h b/ldso/include/inline-hashtab.h
new file mode 100644
index 000000000..4a4812027
--- /dev/null
+++ b/ldso/include/inline-hashtab.h
@@ -0,0 +1,265 @@
+/*
+ * The hashcode handling code below is heavily inspired in libiberty's
+ * hashtab code, but with most adaptation points and support for
+ * deleting elements removed.
+ *
+ * Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ * Contributed by Vladimir Makarov (vmakarov@cygnus.com).
+ */
+
+#ifndef INLINE_HASHTAB_H
+# define INLINE_HASHTAB_H 1
+
+static __always_inline unsigned long
+higher_prime_number(unsigned long n)
+{
+ /* These are primes that are near, but slightly smaller than, a power of two. */
+ static const unsigned long primes[] = {
+ 7,
+ 13,
+ 31,
+ 61,
+ 127,
+ 251,
+ 509,
+ 1021,
+ 2039,
+ 4093,
+ 8191,
+ 16381,
+ 32749,
+ 65521,
+ 131071,
+ 262139,
+ 524287,
+ 1048573,
+ 2097143,
+ 4194301,
+ 8388593,
+ 16777213,
+ 33554393,
+ 67108859,
+ 134217689,
+ 268435399,
+ 536870909,
+ 1073741789,
+ /* 4294967291 */
+ ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
+ };
+ const unsigned long *low = &primes[0];
+ const unsigned long *high = &primes[ARRAY_SIZE(primes)];
+
+ while (low != high) {
+ const unsigned long *mid = low + (high - low) / 2;
+ if (n > *mid)
+ low = mid + 1;
+ else
+ high = mid;
+ }
+
+#if 0
+ /* If we've run out of primes, abort. */
+ if (n > *low) {
+ fprintf(stderr, "Cannot find prime bigger than %lu\n", n);
+ abort();
+ }
+#endif
+
+ return *low;
+}
+
+struct funcdesc_ht
+{
+ /* Table itself */
+ void **entries;
+
+ /* Current size (in entries) of the hash table */
+ size_t size;
+
+ /* Current number of elements */
+ size_t n_elements;
+};
+
+static __always_inline struct funcdesc_ht *
+htab_create(void)
+{
+ struct funcdesc_ht *ht = _dl_malloc(sizeof(*ht));
+ size_t ent_size;
+
+ if (!ht)
+ return NULL;
+ ht->size = 3;
+ ent_size = sizeof(void *) * ht->size;
+ ht->entries = _dl_malloc(ent_size);
+ if (!ht->entries)
+ return NULL;
+
+ ht->n_elements = 0;
+ _dl_memset(ht->entries, 0, ent_size);
+
+ return ht;
+}
+
+/*
+ * This is only called from _dl_loadaddr_unmap, so it's safe to call
+ * _dl_free(). See the discussion below.
+ */
+static __always_inline void
+htab_delete(struct funcdesc_ht *htab)
+{
+ size_t i;
+
+ for (i = htab->size - 1; i >= 0; i--)
+ if (htab->entries[i])
+ _dl_free(htab->entries[i]);
+
+ _dl_free(htab->entries);
+ _dl_free(htab);
+}
+
+/*
+ * Similar to htab_find_slot, but without several unwanted side effects:
+ * - Does not call htab->eq_f when it finds an existing entry.
+ * - Does not change the count of elements/searches/collisions in the
+ * hash table.
+ * This function also assumes there are no deleted entries in the table.
+ * HASH is the hash value for the element to be inserted.
+ */
+static __always_inline void **
+find_empty_slot_for_expand(struct funcdesc_ht *htab, int hash)
+{
+ size_t size = htab->size;
+ unsigned int index = hash % size;
+ void **slot = htab->entries + index;
+ int hash2;
+
+ if (!*slot)
+ return slot;
+
+ hash2 = 1 + hash % (size - 2);
+ for (;;) {
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ slot = htab->entries + index;
+ if (!*slot)
+ return slot;
+ }
+}
+
+/*
+ * The following function changes size of memory allocated for the
+ * entries and repeatedly inserts the table elements. The occupancy
+ * of the table after the call will be about 50%. Naturally the hash
+ * table must already exist. Remember also that the place of the
+ * table entries is changed. If memory allocation failures are allowed,
+ * this function will return zero, indicating that the table could not be
+ * expanded. If all goes well, it will return a non-zero value.
+ */
+static __always_inline int
+htab_expand(struct funcdesc_ht *htab, int (*hash_fn) (void *))
+{
+ void **oentries;
+ void **olimit;
+ void **p;
+ void **nentries;
+ size_t nsize;
+
+ oentries = htab->entries;
+ olimit = oentries + htab->size;
+
+ /*
+ * Resize only when table after removal of unused elements is either
+ * too full or too empty.
+ */
+ if (htab->n_elements * 2 > htab->size)
+ nsize = higher_prime_number(htab->n_elements * 2);
+ else
+ nsize = htab->size;
+
+ nentries = _dl_malloc(sizeof(*nentries) * nsize);
+ _dl_memset(nentries, 0, sizeof(*nentries) * nsize);
+ if (nentries == NULL)
+ return 0;
+ htab->entries = nentries;
+ htab->size = nsize;
+
+ p = oentries;
+ do {
+ if (*p)
+ *find_empty_slot_for_expand(htab, hash_fn(*p)) = *p;
+ p++;
+ } while (p < olimit);
+
+#if 0
+ /*
+ * We can't tell whether this was allocated by the _dl_malloc()
+ * built into ld.so or malloc() in the main executable or libc,
+ * and calling free() for something that wasn't malloc()ed could
+ * do Very Bad Things (TM). Take the conservative approach
+ * here, potentially wasting as much memory as actually used by
+ * the hash table, even if multiple growths occur. That's not
+ * so bad as to require some overengineered solution that would
+ * enable us to keep track of how it was allocated.
+ */
+ _dl_free(oentries);
+#endif
+ return 1;
+}
+
+/*
+ * This function searches for a hash table slot containing an entry
+ * equal to the given element. To delete an entry, call this with
+ * INSERT = 0, then call htab_clear_slot on the slot returned (possibly
+ * after doing some checks). To insert an entry, call this with
+ * INSERT = 1, then write the value you want into the returned slot.
+ * When inserting an entry, NULL may be returned if memory allocation
+ * fails.
+ */
+static __always_inline void **
+htab_find_slot(struct funcdesc_ht *htab, void *ptr, int insert,
+ int (*hash_fn)(void *), int (*eq_fn)(void *, void *))
+{
+ unsigned int index;
+ int hash, hash2;
+ size_t size;
+ void **entry;
+
+ if (htab->size * 3 <= htab->n_elements * 4 &&
+ htab_expand(htab, hash_fn) == 0)
+ return NULL;
+
+ hash = hash_fn(ptr);
+
+ size = htab->size;
+ index = hash % size;
+
+ entry = &htab->entries[index];
+ if (!*entry)
+ goto empty_entry;
+ else if (eq_fn(*entry, ptr))
+ return entry;
+
+ hash2 = 1 + hash % (size - 2);
+ for (;;) {
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ entry = &htab->entries[index];
+ if (!*entry)
+ goto empty_entry;
+ else if (eq_fn(*entry, ptr))
+ return entry;
+ }
+
+ empty_entry:
+ if (!insert)
+ return NULL;
+
+ htab->n_elements++;
+ return entry;
+}
+
+#endif
diff --git a/ldso/include/ldsodefs.h b/ldso/include/ldsodefs.h
index 4063d00f4..f17ac0ca0 100644
--- a/ldso/include/ldsodefs.h
+++ b/ldso/include/ldsodefs.h
@@ -62,13 +62,18 @@ extern void _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
extern void _dl_allocate_static_tls (struct link_map *map)
internal_function attribute_hidden;
+extern int _dl_try_allocate_static_tls (struct link_map* map)
+ internal_function attribute_hidden;
/* Taken from glibc/elf/dl-reloc.c */
#define CHECK_STATIC_TLS(sym_map) \
do { \
- if (unlikely((sym_map)->l_tls_offset == NO_TLS_OFFSET)) \
+ if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET, 0)) \
_dl_allocate_static_tls (sym_map); \
} while (0)
+#define TRY_STATIC_TLS(sym_map) \
+ (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
+ || _dl_try_allocate_static_tls (sym_map) == 0)
/* These are internal entry points to the two halves of _dl_allocate_tls,
only used within rtld.c itself at startup time. */
diff --git a/ldso/include/tlsdeschtab.h b/ldso/include/tlsdeschtab.h
new file mode 100644
index 000000000..86baea148
--- /dev/null
+++ b/ldso/include/tlsdeschtab.h
@@ -0,0 +1,119 @@
+/* Hash table for TLS descriptors.
+ Copyright (C) 2005-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+ uClibc port by Baruch Siach <baruch@tkos.co.il>
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef TLSDESCHTAB_H
+# define TLSDESCHTAB_H 1
+
+# ifdef SHARED
+
+# include <inline-hashtab.h>
+
+inline static int
+hash_tlsdesc (void *p)
+{
+ struct tlsdesc_dynamic_arg *td = p;
+
+ /* We know all entries are for the same module, so ti_offset is the
+ only distinguishing entry. */
+ return td->tlsinfo.ti_offset;
+}
+
+inline static int
+eq_tlsdesc (void *p, void *q)
+{
+ struct tlsdesc_dynamic_arg *tdp = p, *tdq = q;
+
+ return tdp->tlsinfo.ti_offset == tdq->tlsinfo.ti_offset;
+}
+
+inline static int
+map_generation (struct link_map *map)
+{
+ size_t idx = map->l_tls_modid;
+ struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
+
+ /* Find the place in the dtv slotinfo list. */
+ do
+ {
+ /* Does it fit in the array of this list element? */
+ if (idx < listp->len)
+ {
+ /* We should never get here for a module in static TLS, so
+ we can assume that, if the generation count is zero, we
+ still haven't determined the generation count for this
+ module. */
+ if (listp->slotinfo[idx].gen)
+ return listp->slotinfo[idx].gen;
+ else
+ break;
+ }
+ idx -= listp->len;
+ listp = listp->next;
+ }
+ while (listp != NULL);
+
+ /* If we get to this point, the module still hasn't been assigned an
+ entry in the dtv slotinfo data structures, and it will when we're
+ done with relocations. At that point, the module will get a
+ generation number that is one past the current generation, so
+ return exactly that. */
+ return GL(dl_tls_generation) + 1;
+}
+
+void *
+internal_function
+_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
+{
+ struct funcdesc_ht *ht;
+ void **entry;
+ struct tlsdesc_dynamic_arg *td, test;
+
+ ht = map->l_tlsdesc_table;
+ if (! ht)
+ {
+ ht = htab_create ();
+ if (! ht)
+ return 0;
+ map->l_tlsdesc_table = ht;
+ }
+
+ test.tlsinfo.ti_module = map->l_tls_modid;
+ test.tlsinfo.ti_offset = ti_offset;
+ entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc);
+ if (*entry)
+ {
+ td = *entry;
+ return td;
+ }
+
+ *entry = td = _dl_malloc (sizeof (struct tlsdesc_dynamic_arg));
+ /* This may be higher than the map's generation, but it doesn't
+ matter much. Worst case, we'll have one extra DTV update per
+ thread. */
+ td->gen_count = map_generation (map);
+ td->tlsinfo = test.tlsinfo;
+
+ return td;
+}
+
+# endif /* SHARED */
+
+#endif
diff --git a/ldso/ldso/Makefile.in b/ldso/ldso/Makefile.in
index e0d0a097d..56f4a4556 100644
--- a/ldso/ldso/Makefile.in
+++ b/ldso/ldso/Makefile.in
@@ -27,6 +27,14 @@ CFLAGS-$(DODEBUG)-ldso/ldso := -O2 -g
CFLAGS-ldso.c := -DLDSO_ELFINTERP=\"$(TARGET_ARCH)/elfinterp.c\"
+# avoid ld.so linking error since gcc 4.9.x: undefined reference to abort
+ifeq ($(TARGET_ARCH),xtensa)
+CFLAGS-ldso.c += -fno-delete-null-pointer-checks
+endif
+ifeq ($(TARGET_ARCH),sh)
+CFLAGS-ldso.c += -fno-delete-null-pointer-checks
+endif
+
LDFLAGS-$(UCLIBC_FORMAT_DSBT_ELF)-$(UCLIBC_LDSO_NAME).so := -Wl,--dsbt-index=1
ifneq ($(SUPPORT_LD_DEBUG),y)
LDFLAGS-$(UCLIBC_LDSO_NAME).so := $(LDFLAGS)
diff --git a/ldso/ldso/arm/dl-sysdep.h b/ldso/ldso/arm/dl-sysdep.h
index 94dc1d707..dc89710c6 100644
--- a/ldso/ldso/arm/dl-sysdep.h
+++ b/ldso/ldso/arm/dl-sysdep.h
@@ -105,7 +105,7 @@ elf_machine_dynamic (void)
return dynamic;
}
-extern void __dl_start __asm__ ("_dl_start");
+extern char __dl_start[] __asm__("_dl_start");
/* Return the run-time load address of the shared object. */
static __always_inline Elf32_Addr __attribute__ ((unused))
diff --git a/ldso/ldso/arm/elfinterp.c b/ldso/ldso/arm/elfinterp.c
index c2f52b822..2043263ec 100644
--- a/ldso/ldso/arm/elfinterp.c
+++ b/ldso/ldso/arm/elfinterp.c
@@ -69,7 +69,7 @@ unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry)
got_addr = (char **) instr_addr;
/* Get the address of the GOT entry */
- new_addr = _dl_find_hash(symname, &_dl_loaded_modules->symbol_scope,
+ new_addr = (unsigned long)_dl_find_hash(symname, &_dl_loaded_modules->symbol_scope,
tpnt, ELF_RTYPE_CLASS_PLT, NULL);
if (unlikely(!new_addr)) {
_dl_dprintf(2, "%s: can't resolve symbol '%s'\n",
@@ -203,7 +203,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct r_scope_elem *scope,
symname = strtab + symtab[symtab_index].st_name;
if (symtab_index) {
- symbol_addr = _dl_find_hash(symname, scope, tpnt,
+ symbol_addr = (unsigned long)_dl_find_hash(symname, scope, tpnt,
elf_machine_type_class(reloc_type), &sym_ref);
/*
diff --git a/ldso/ldso/avr32/dl-sysdep.h b/ldso/ldso/avr32/dl-sysdep.h
index 515d829d0..a42212731 100644
--- a/ldso/ldso/avr32/dl-sysdep.h
+++ b/ldso/ldso/avr32/dl-sysdep.h
@@ -63,11 +63,12 @@ elf_machine_dynamic (void)
return *got;
}
+extern char __dl_start[] __asm__("_dl_start");
+
/* Return the run-time load address of the shared object. */
static __always_inline Elf32_Addr
elf_machine_load_address (void)
{
- extern void __dl_start __asm__("_dl_start");
Elf32_Addr got_addr = (Elf32_Addr) &__dl_start;
Elf32_Addr pcrel_addr;
diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c
index 56319056d..49b516390 100644
--- a/ldso/ldso/dl-elf.c
+++ b/ldso/ldso/dl-elf.c
@@ -133,56 +133,60 @@ _dl_protect_relro (struct elf_resolve *l)
* in uClibc/ldso/util/ldd.c */
static struct elf_resolve *
search_for_named_library(const char *name, unsigned rflags, const char *path_list,
- struct dyn_elf **rpnt)
+ struct dyn_elf **rpnt, const char* origin)
{
- char *path, *path_n, *mylibname;
+ char *mylibname;
struct elf_resolve *tpnt;
- int done;
+ const char *p, *pn;
+ int plen;
if (path_list==NULL)
return NULL;
- /* We need a writable copy of this string, but we don't
- * need this allocated permanently since we don't want
- * to leak memory, so use alloca to put path on the stack */
- done = _dl_strlen(path_list);
- path = alloca(done + 1);
-
/* another bit of local storage */
mylibname = alloca(2050);
- _dl_memcpy(path, path_list, done+1);
-
/* Unlike ldd.c, don't bother to eliminate double //s */
/* Replace colons with zeros in path_list */
/* : at the beginning or end of path maps to CWD */
/* :: anywhere maps CWD */
/* "" maps to CWD */
- done = 0;
- path_n = path;
- do {
- if (*path == 0) {
- *path = ':';
- done = 1;
+ for (p = path_list; p != NULL; p = pn) {
+ pn = _dl_strchr(p + 1, ':');
+ if (pn != NULL) {
+ plen = pn - p;
+ pn++;
+ } else
+ plen = _dl_strlen(p);
+
+ if (plen >= 7 && _dl_memcmp(p, "$ORIGIN", 7) == 0) {
+ int olen;
+ if (rflags && plen != 7)
+ continue;
+ if (origin == NULL)
+ continue;
+ for (olen = _dl_strlen(origin) - 1; olen >= 0 && origin[olen] != '/'; olen--)
+ ;
+ if (olen <= 0)
+ continue;
+ _dl_memcpy(&mylibname[0], origin, olen);
+ _dl_memcpy(&mylibname[olen], p + 7, plen - 7);
+ mylibname[olen + plen - 7] = 0;
+ } else if (plen != 0) {
+ _dl_memcpy(mylibname, p, plen);
+ mylibname[plen] = 0;
+ } else {
+ _dl_strcpy(mylibname, ".");
}
- if (*path == ':') {
- *path = 0;
- if (*path_n)
- _dl_strcpy(mylibname, path_n);
- else
- _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */
- _dl_strcat(mylibname, "/");
- _dl_strcat(mylibname, name);
+ _dl_strcat(mylibname, "/");
+ _dl_strcat(mylibname, name);
#ifdef __LDSO_SAFE_RUNPATH__
- if (*mylibname == '/')
+ if (*mylibname == '/')
#endif
- if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL)
- return tpnt;
- path_n = path+1;
- }
- path++;
- } while (!done);
+ if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL)
+ return tpnt;
+ }
return NULL;
}
@@ -234,8 +238,10 @@ struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rp
if (pnt) {
pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
_dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt);
- if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
+ if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt,
+ tpnt->libname)) != NULL)
return tpnt1;
+
}
#endif
@@ -243,7 +249,7 @@ struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rp
/* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
if (_dl_library_path) {
_dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
- if ((tpnt1 = search_for_named_library(libname, rflags, _dl_library_path, rpnt)) != NULL)
+ if ((tpnt1 = search_for_named_library(libname, rflags, _dl_library_path, rpnt, NULL)) != NULL)
{
return tpnt1;
}
@@ -257,9 +263,21 @@ struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rp
if (pnt) {
pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
_dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt);
- if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
+ if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt, NULL)) != NULL)
return tpnt1;
}
+#ifdef __LDSO_RUNPATH_OF_EXECUTABLE__
+ /*
+ * Try the DT_RPATH of the executable itself.
+ */
+ pnt = (char *) _dl_loaded_modules->dynamic_info[DT_RPATH];
+ if (pnt) {
+ pnt += (unsigned long) _dl_loaded_modules->dynamic_info[DT_STRTAB];
+ _dl_if_debug_dprint("\tsearching exe's RPATH='%s'\n", pnt);
+ if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
+ return tpnt1;
+ }
+#endif
#endif
/*
@@ -291,7 +309,7 @@ struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rp
/* Look for libraries wherever the shared library loader
* was installed */
_dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
- tpnt1 = search_for_named_library(libname, rflags, _dl_ldsopath, rpnt);
+ tpnt1 = search_for_named_library(libname, rflags, _dl_ldsopath, rpnt, NULL);
if (tpnt1 != NULL)
return tpnt1;
#endif
@@ -304,7 +322,7 @@ struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rp
#ifndef __LDSO_CACHE_SUPPORT__
":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
#endif
- , rpnt);
+ , rpnt, NULL);
if (tpnt1 != NULL)
return tpnt1;
diff --git a/ldso/ldso/dl-tls.c b/ldso/ldso/dl-tls.c
index 6679693f4..5d6d3b9d3 100644
--- a/ldso/ldso/dl-tls.c
+++ b/ldso/ldso/dl-tls.c
@@ -100,20 +100,16 @@ _dl_realloc (void * __ptr, size_t __size)
* the static TLS area already allocated for each running thread. If this
* object's TLS segment is too big to fit, we fail. If it fits,
* we set MAP->l_tls_offset and return.
- * This function intentionally does not return any value but signals error
- * directly, as static TLS should be rare and code handling it should
- * not be inlined as much as possible.
*/
-void
-internal_function __attribute_noinline__
-_dl_allocate_static_tls (struct link_map *map)
+int
+internal_function
+_dl_try_allocate_static_tls (struct link_map* map)
{
/* If the alignment requirements are too high fail. */
if (map->l_tls_align > _dl_tls_static_align)
{
fail:
- _dl_dprintf(2, "cannot allocate memory in static TLS block");
- _dl_exit(30);
+ return -1;
}
# ifdef TLS_TCB_AT_TP
@@ -169,6 +165,23 @@ fail:
}
else
map->l_need_tls_init = 1;
+
+ return 0;
+}
+
+/*
+ * This function intentionally does not return any value but signals error
+ * directly, as static TLS should be rare and code handling it should
+ * not be inlined as much as possible.
+ */
+void
+internal_function __attribute_noinline__
+_dl_allocate_static_tls (struct link_map *map)
+{
+ if (_dl_try_allocate_static_tls (map)) {
+ _dl_dprintf(2, "cannot allocate memory in static TLS block");
+ _dl_exit(30);
+ }
}
#ifdef SHARED
diff --git a/ldso/ldso/fdpic/dl-inlines.h b/ldso/ldso/fdpic/dl-inlines.h
index 14a491689..ebbd0334c 100644
--- a/ldso/ldso/fdpic/dl-inlines.h
+++ b/ldso/ldso/fdpic/dl-inlines.h
@@ -5,6 +5,8 @@
* Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
*/
+#include <inline-hashtab.h>
+
/* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete load map. */
static __always_inline void
__dl_init_loadaddr_map(struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer,
@@ -143,269 +145,18 @@ __dl_addr_in_loadaddr(void *p, struct elf32_fdpic_loadaddr loadaddr)
return 0;
}
-/*
- * The hashcode handling code below is heavily inspired in libiberty's
- * hashtab code, but with most adaptation points and support for
- * deleting elements removed.
- *
- * Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
- * Contributed by Vladimir Makarov (vmakarov@cygnus.com).
- */
-static __always_inline unsigned long
-higher_prime_number(unsigned long n)
-{
- /* These are primes that are near, but slightly smaller than, a power of two. */
- static const unsigned long primes[] = {
- 7,
- 13,
- 31,
- 61,
- 127,
- 251,
- 509,
- 1021,
- 2039,
- 4093,
- 8191,
- 16381,
- 32749,
- 65521,
- 131071,
- 262139,
- 524287,
- 1048573,
- 2097143,
- 4194301,
- 8388593,
- 16777213,
- 33554393,
- 67108859,
- 134217689,
- 268435399,
- 536870909,
- 1073741789,
- /* 4294967291 */
- ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
- };
- const unsigned long *low = &primes[0];
- const unsigned long *high = &primes[ARRAY_SIZE(primes)];
-
- while (low != high) {
- const unsigned long *mid = low + (high - low) / 2;
- if (n > *mid)
- low = mid + 1;
- else
- high = mid;
- }
-
-#if 0
- /* If we've run out of primes, abort. */
- if (n > *low) {
- fprintf(stderr, "Cannot find prime bigger than %lu\n", n);
- abort();
- }
-#endif
-
- return *low;
-}
-
-struct funcdesc_ht
-{
- /* Table itself */
- struct funcdesc_value **entries;
-
- /* Current size (in entries) of the hash table */
- size_t size;
-
- /* Current number of elements */
- size_t n_elements;
-};
-
-static __always_inline int
-hash_pointer(const void *p)
+static int
+hash_pointer(void *p)
{
return (int) ((long)p >> 3);
}
-static __always_inline struct funcdesc_ht *
-htab_create(void)
-{
- struct funcdesc_ht *ht = _dl_malloc(sizeof(*ht));
- size_t ent_size;
-
- if (!ht)
- return NULL;
- ht->size = 3;
- ent_size = sizeof(struct funcdesc_ht_value *) * ht->size;
- ht->entries = _dl_malloc(ent_size);
- if (!ht->entries)
- return NULL;
-
- ht->n_elements = 0;
- _dl_memset(ht->entries, 0, ent_size);
-
- return ht;
-}
-
-/*
- * This is only called from _dl_loadaddr_unmap, so it's safe to call
- * _dl_free(). See the discussion below.
- */
-static __always_inline void
-htab_delete(struct funcdesc_ht *htab)
+static int
+eq_pointer(void *p, void *q)
{
- size_t i;
-
- for (i = htab->size - 1; i >= 0; i--)
- if (htab->entries[i])
- _dl_free(htab->entries[i]);
-
- _dl_free(htab->entries);
- _dl_free(htab);
-}
-
-/*
- * Similar to htab_find_slot, but without several unwanted side effects:
- * - Does not call htab->eq_f when it finds an existing entry.
- * - Does not change the count of elements/searches/collisions in the
- * hash table.
- * This function also assumes there are no deleted entries in the table.
- * HASH is the hash value for the element to be inserted.
- */
-static __always_inline struct funcdesc_value **
-find_empty_slot_for_expand(struct funcdesc_ht *htab, int hash)
-{
- size_t size = htab->size;
- unsigned int index = hash % size;
- struct funcdesc_value **slot = htab->entries + index;
- int hash2;
-
- if (!*slot)
- return slot;
-
- hash2 = 1 + hash % (size - 2);
- for (;;) {
- index += hash2;
- if (index >= size)
- index -= size;
-
- slot = htab->entries + index;
- if (!*slot)
- return slot;
- }
-}
-
-/*
- * The following function changes size of memory allocated for the
- * entries and repeatedly inserts the table elements. The occupancy
- * of the table after the call will be about 50%. Naturally the hash
- * table must already exist. Remember also that the place of the
- * table entries is changed. If memory allocation failures are allowed,
- * this function will return zero, indicating that the table could not be
- * expanded. If all goes well, it will return a non-zero value.
- */
-static __always_inline int
-htab_expand(struct funcdesc_ht *htab)
-{
- struct funcdesc_value **oentries;
- struct funcdesc_value **olimit;
- struct funcdesc_value **p;
- struct funcdesc_value **nentries;
- size_t nsize;
-
- oentries = htab->entries;
- olimit = oentries + htab->size;
-
- /*
- * Resize only when table after removal of unused elements is either
- * too full or too empty.
- */
- if (htab->n_elements * 2 > htab->size)
- nsize = higher_prime_number(htab->n_elements * 2);
- else
- nsize = htab->size;
-
- nentries = _dl_malloc(sizeof(*nentries) * nsize);
- _dl_memset(nentries, 0, sizeof(*nentries) * nsize);
- if (nentries == NULL)
- return 0;
- htab->entries = nentries;
- htab->size = nsize;
-
- p = oentries;
- do {
- if (*p)
- *find_empty_slot_for_expand(htab, hash_pointer((*p)->entry_point)) = *p;
- p++;
- } while (p < olimit);
-
-#if 0
- /*
- * We can't tell whether this was allocated by the _dl_malloc()
- * built into ld.so or malloc() in the main executable or libc,
- * and calling free() for something that wasn't malloc()ed could
- * do Very Bad Things (TM). Take the conservative approach
- * here, potentially wasting as much memory as actually used by
- * the hash table, even if multiple growths occur. That's not
- * so bad as to require some overengineered solution that would
- * enable us to keep track of how it was allocated.
- */
- _dl_free(oentries);
-#endif
- return 1;
-}
-
-/*
- * This function searches for a hash table slot containing an entry
- * equal to the given element. To delete an entry, call this with
- * INSERT = 0, then call htab_clear_slot on the slot returned (possibly
- * after doing some checks). To insert an entry, call this with
- * INSERT = 1, then write the value you want into the returned slot.
- * When inserting an entry, NULL may be returned if memory allocation
- * fails.
- */
-static __always_inline struct funcdesc_value **
-htab_find_slot(struct funcdesc_ht *htab, void *ptr, int insert)
-{
- unsigned int index;
- int hash, hash2;
- size_t size;
- struct funcdesc_value **entry;
-
- if (htab->size * 3 <= htab->n_elements * 4 &&
- htab_expand(htab) == 0)
- return NULL;
-
- hash = hash_pointer(ptr);
-
- size = htab->size;
- index = hash % size;
-
- entry = &htab->entries[index];
- if (!*entry)
- goto empty_entry;
- else if ((*entry)->entry_point == ptr)
- return entry;
-
- hash2 = 1 + hash % (size - 2);
- for (;;) {
- index += hash2;
- if (index >= size)
- index -= size;
-
- entry = &htab->entries[index];
- if (!*entry)
- goto empty_entry;
- else if ((*entry)->entry_point == ptr)
- return entry;
- }
-
- empty_entry:
- if (!insert)
- return NULL;
+ struct funcdesc_value *entry = p;
- htab->n_elements++;
- return entry;
+ return entry->entry_point == q;
}
void *
@@ -424,7 +175,7 @@ _dl_funcdesc_for (void *entry_point, void *got_value)
tpnt->funcdesc_ht = ht;
}
- entry = htab_find_slot(ht, entry_point, 1);
+ entry = htab_find_slot(ht, entry_point, 1, hash_pointer, eq_pointer);
if (*entry) {
_dl_assert((*entry)->entry_point == entry_point);
return _dl_stabilize_funcdesc(*entry);
@@ -459,7 +210,8 @@ _dl_lookup_address(void const *address)
if (fd->got_value != rpnt->loadaddr.got_value)
continue;
- address = htab_find_slot(rpnt->funcdesc_ht, (void *)fd->entry_point, 0);
+ address = htab_find_slot(rpnt->funcdesc_ht, (void *)fd->entry_point, 0,
+ hash_pointer, eq_pointer);
if (address && *(struct funcdesc_value *const*)address == fd) {
address = (*(struct funcdesc_value *const*)address)->entry_point;
diff --git a/ldso/ldso/ldso.c b/ldso/ldso/ldso.c
index 56196292b..7367f1737 100644
--- a/ldso/ldso/ldso.c
+++ b/ldso/ldso/ldso.c
@@ -403,6 +403,20 @@ static ptrdiff_t _dl_build_local_scope (struct elf_resolve **list,
return p - list;
}
+static void _dl_setup_progname(const char *argv0)
+{
+ char image[PATH_MAX];
+ ssize_t s;
+
+ s = _dl_readlink(AT_FDCWD, "/proc/self/exe", image, sizeof(image));
+ if (s > 0 && image[0] == '/') {
+ image[s] = 0;
+ _dl_progname = _dl_strdup(image);
+ } else if (argv0) {
+ _dl_progname = argv0;
+ }
+}
+
void *_dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr,
ElfW(auxv_t) auxvt[AT_EGID + 1], char **envp, char **argv
DL_GET_READY_TO_RUN_EXTRA_PARMS)
@@ -454,9 +468,7 @@ void *_dl_get_ready_to_run(struct elf_resolve *tpnt, DL_LOADADDR_TYPE load_addr,
* been fixed up by now. Still no function calls outside of this
* library, since the dynamic resolver is not yet ready.
*/
- if (argv[0]) {
- _dl_progname = argv[0];
- }
+ _dl_setup_progname(argv[0]);
#ifdef __DSBT__
_dl_ldso_dsbt = (void *)tpnt->dynamic_info[DT_DSBT_BASE_IDX];
diff --git a/ldso/ldso/mips/elfinterp.c b/ldso/ldso/mips/elfinterp.c
index dfe37c526..6310c7735 100644
--- a/ldso/ldso/mips/elfinterp.c
+++ b/ldso/ldso/mips/elfinterp.c
@@ -239,12 +239,12 @@ int _dl_parse_relocation_information(struct dyn_elf *xpnt,
case R_MIPS_TLS_DTPMOD64:
case R_MIPS_TLS_DTPMOD32:
if (tls_tpnt)
- *(ElfW(Word) *)reloc_addr = tls_tpnt->l_tls_modid;
+ *(ElfW(Addr) *)reloc_addr = tls_tpnt->l_tls_modid;
break;
case R_MIPS_TLS_DTPREL64:
case R_MIPS_TLS_DTPREL32:
- *(ElfW(Word) *)reloc_addr +=
+ *(ElfW(Addr) *)reloc_addr +=
TLS_DTPREL_VALUE (symbol_addr);
break;
diff --git a/ldso/ldso/xtensa/dl-debug.h b/ldso/ldso/xtensa/dl-debug.h
index 4128d9452..18beae5ca 100644
--- a/ldso/ldso/xtensa/dl-debug.h
+++ b/ldso/ldso/xtensa/dl-debug.h
@@ -8,54 +8,31 @@
static const char * const _dl_reltypes_tab[] =
{
- "R_XTENSA_NONE",
- "R_XTENSA_32",
- "R_XTENSA_RTLD",
- "R_XTENSA_GLOB_DAT",
- "R_XTENSA_JMP_SLOT",
- "R_XTENSA_RELATIVE",
- "R_XTENSA_PLT",
- "R_XTENSA_UNUSED7",
- "R_XTENSA_OP0",
- "R_XTENSA_OP1",
- "R_XTENSA_OP2",
- "R_XTENSA_ASM_EXPAND",
- "R_XTENSA_ASM_SIMPLIFY",
- "R_XTENSA_UNUSED13",
- "R_XTENSA_UNUSED14",
- "R_XTENSA_GNU_VTINHERIT",
- "R_XTENSA_GNU_VTENTRY",
- "R_XTENSA_DIFF8",
- "R_XTENSA_DIFF16",
- "R_XTENSA_DIFF32",
- "R_XTENSA_SLOT0_OP",
- "R_XTENSA_SLOT1_OP",
- "R_XTENSA_SLOT2_OP",
- "R_XTENSA_SLOT3_OP",
- "R_XTENSA_SLOT4_OP",
- "R_XTENSA_SLOT5_OP",
- "R_XTENSA_SLOT6_OP",
- "R_XTENSA_SLOT7_OP",
- "R_XTENSA_SLOT8_OP",
- "R_XTENSA_SLOT9_OP",
- "R_XTENSA_SLOT10_OP",
- "R_XTENSA_SLOT11_OP",
- "R_XTENSA_SLOT12_OP",
- "R_XTENSA_SLOT13_OP",
- "R_XTENSA_SLOT14_OP",
- "R_XTENSA_SLOT0_ALT",
- "R_XTENSA_SLOT1_ALT",
- "R_XTENSA_SLOT2_ALT",
- "R_XTENSA_SLOT3_ALT",
- "R_XTENSA_SLOT4_ALT",
- "R_XTENSA_SLOT5_ALT",
- "R_XTENSA_SLOT6_ALT",
- "R_XTENSA_SLOT7_ALT",
- "R_XTENSA_SLOT8_ALT",
- "R_XTENSA_SLOT9_ALT",
- "R_XTENSA_SLOT10_ALT",
- "R_XTENSA_SLOT11_ALT",
- "R_XTENSA_SLOT12_ALT",
- "R_XTENSA_SLOT13_ALT",
- "R_XTENSA_SLOT14_ALT"
+ [0] "R_XTENSA_NONE", "R_XTENSA_32",
+ [2] "R_XTENSA_RTLD", "R_XTENSA_GLOB_DAT",
+ [4] "R_XTENSA_JMP_SLOT", "R_XTENSA_RELATIVE",
+ [6] "R_XTENSA_PLT", "R_XTENSA_UNUSED7",
+ [8] "R_XTENSA_OP0", "R_XTENSA_OP1",
+ [10] "R_XTENSA_OP2", "R_XTENSA_ASM_EXPAND",
+ [12] "R_XTENSA_ASM_SIMPLIFY", "R_XTENSA_UNUSED13",
+ [14] "R_XTENSA_UNUSED14", "R_XTENSA_GNU_VTINHERIT",
+ [16] "R_XTENSA_GNU_VTENTRY", "R_XTENSA_DIFF8",
+ [18] "R_XTENSA_DIFF16", "R_XTENSA_DIFF32",
+ [20] "R_XTENSA_SLOT0_OP", "R_XTENSA_SLOT1_OP",
+ [22] "R_XTENSA_SLOT2_OP", "R_XTENSA_SLOT3_OP",
+ [24] "R_XTENSA_SLOT4_OP", "R_XTENSA_SLOT5_OP",
+ [26] "R_XTENSA_SLOT6_OP", "R_XTENSA_SLOT7_OP",
+ [28] "R_XTENSA_SLOT8_OP", "R_XTENSA_SLOT9_OP",
+ [30] "R_XTENSA_SLOT10_OP", "R_XTENSA_SLOT11_OP",
+ [32] "R_XTENSA_SLOT12_OP", "R_XTENSA_SLOT13_OP",
+ [34] "R_XTENSA_SLOT14_OP", "R_XTENSA_SLOT0_ALT",
+ [36] "R_XTENSA_SLOT1_ALT", "R_XTENSA_SLOT2_ALT",
+ [38] "R_XTENSA_SLOT3_ALT", "R_XTENSA_SLOT4_ALT",
+ [40] "R_XTENSA_SLOT5_ALT", "R_XTENSA_SLOT6_ALT",
+ [42] "R_XTENSA_SLOT7_ALT", "R_XTENSA_SLOT8_ALT",
+ [44] "R_XTENSA_SLOT9_ALT", "R_XTENSA_SLOT10_ALT",
+ [46] "R_XTENSA_SLOT11_ALT", "R_XTENSA_SLOT12_ALT",
+ [48] "R_XTENSA_SLOT13_ALT", "R_XTENSA_SLOT14_ALT",
+ [50] "R_XTENSA_TLSDESC_FN", "R_XTENSA_TLSDESC_ARG",
+ [52] "R_XTENSA_TLS_TPOFF"
};
diff --git a/ldso/ldso/xtensa/dl-startup.h b/ldso/ldso/xtensa/dl-startup.h
index 0c28d5e44..8fe54a3c7 100644
--- a/ldso/ldso/xtensa/dl-startup.h
+++ b/ldso/ldso/xtensa/dl-startup.h
@@ -11,7 +11,7 @@
__asm__ (
" .text\n"
" .align 4\n"
- " .literal_position\n"
+ " .literal_position\n"
" .global _start\n"
" .type _start, @function\n"
" .hidden _start\n"
diff --git a/ldso/ldso/xtensa/dl-sysdep.h b/ldso/ldso/xtensa/dl-sysdep.h
index 5f8bcb0a8..d308237d3 100644
--- a/ldso/ldso/xtensa/dl-sysdep.h
+++ b/ldso/ldso/xtensa/dl-sysdep.h
@@ -100,10 +100,13 @@ typedef struct xtensa_got_location_struct {
struct elf_resolve;
extern unsigned long _dl_linux_resolver (struct elf_resolve *, int);
-/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
- undefined references should not be allowed to define the value. */
+/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or
+ TLS variable, so undefined references should not be allowed to define
+ the value. */
#define elf_machine_type_class(type) \
- (((type) == R_XTENSA_JMP_SLOT) * ELF_RTYPE_CLASS_PLT)
+ (((type) == R_XTENSA_JMP_SLOT || (type) == R_XTENSA_TLS_TPOFF \
+ || (type) == R_XTENSA_TLSDESC_FN || (type) == R_XTENSA_TLSDESC_ARG) \
+ * ELF_RTYPE_CLASS_PLT)
/* Return the link-time address of _DYNAMIC. */
static __always_inline Elf32_Addr
diff --git a/ldso/ldso/xtensa/dl-tlsdesc.S b/ldso/ldso/xtensa/dl-tlsdesc.S
new file mode 100644
index 000000000..a6ebc949e
--- /dev/null
+++ b/ldso/ldso/xtensa/dl-tlsdesc.S
@@ -0,0 +1,96 @@
+/* Thread-local storage handling in the ELF dynamic linker. Xtensa version.
+ Copyright (C) 2012-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <tls.h>
+#include "tlsdesc.h"
+
+
+ .text
+ .align 4
+ .hidden _dl_tlsdesc_return
+ .global _dl_tlsdesc_return
+ .type _dl_tlsdesc_return, @function
+_dl_tlsdesc_return:
+ entry a1, 16
+ rur.threadptr a3
+ add a2, a2, a3
+ retw
+ .size _dl_tlsdesc_return, .-_dl_tlsdesc_return
+
+#ifdef SHARED
+
+
+ /* This function is used for symbols that need dynamic TLS.
+
+ The argument passed to this function points to the TLS descriptor.
+
+ The assembly code that follows is a rendition of the following
+ C code, hand-optimized a little bit.
+
+ ptrdiff_t
+ _dl_tlsdesc_dynamic(struct tlsdesc_dynamic_arg *td)
+ {
+ dtv_t *dtv = (dtv_t *)THREAD_DTV();
+ if (td->gen_count <= dtv[0].counter
+ && dtv[td->tlsinfo.ti_module].pointer.val
+ != TLS_DTV_UNALLOCATED)
+ return dtv[td->tlsinfo.ti_module].pointer.val
+ + td->tlsinfo.ti_offset - __builtin_thread_pointer();
+ return __tls_get_addr (&td->tlsinfo) - __builtin_thread_pointer();
+ }
+ */
+
+ .align 4
+ .hidden _dl_tlsdesc_dynamic
+ .global _dl_tlsdesc_dynamic
+ .type _dl_tlsdesc_dynamic, @function
+_dl_tlsdesc_dynamic:
+ entry a1, 32
+
+ /* dtv_t *dtv = (dtv_t *)THREAD_DTV(); */
+ rur.threadptr a3
+ l32i a4, a3, 0
+
+ /* if (td->gen_count <= dtv[0].counter */
+ l32i a6, a2, TLSDESC_GEN_COUNT
+ l32i a7, a4, 0
+ blt a7, a6, .Lslow
+
+ /* && dtv[td->tlsinfo.ti_module].pointer.val != TLS_DTV_UNALLOCATED) */
+ l32i a6, a2, TLSDESC_MODID
+ addx8 a6, a3, a6
+ l32i a6, a6, 0
+ beqi a6, -1, .Lslow
+
+ /* return dtv[td->tlsinfo.ti_module].pointer.val
+ + td->tlsinfo.ti_offset - __builtin_thread_pointer(); */
+ l32i a6, a2, TLSDESC_MODOFF
+ sub a2, a6, a3
+ retw
+
+ /* return __tls_get_addr (&td->tlsinfo) - __builtin_thread_pointer(); */
+.Lslow:
+ mov a10, a2
+ movi a8, __tls_get_addr
+ callx8 a8
+ sub a2, a10, a3
+ retw
+ .size _dl_tlsdesc_dynamic, .-_dl_tlsdesc_dynamic
+
+#endif /* SHARED */
diff --git a/ldso/ldso/xtensa/elfinterp.c b/ldso/ldso/xtensa/elfinterp.c
index b4cf9752d..1397e95c9 100644
--- a/ldso/ldso/xtensa/elfinterp.c
+++ b/ldso/ldso/xtensa/elfinterp.c
@@ -31,6 +31,8 @@
*/
#include "ldso.h"
+#include "dl-tls.h"
+#include "tlsdeschtab.h"
unsigned long
_dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry)
@@ -146,6 +148,9 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope,
int reloc_type;
int symtab_index;
char *symname;
+#if defined USE_TLS && USE_TLS
+ struct elf_resolve *tls_tpnt = NULL;
+#endif
struct symbol_ref sym_ref;
ElfW(Addr) *reloc_addr;
ElfW(Addr) symbol_addr;
@@ -172,15 +177,22 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope,
* here, so all bases should be covered.
*/
if (unlikely (!symbol_addr &&
+ ELF_ST_TYPE (sym_ref.sym->st_info) != STT_TLS &&
ELF_ST_BIND (sym_ref.sym->st_info) != STB_WEAK)) {
- _dl_dprintf (2, "%s: can't resolve symbol '%s'\n",
- _dl_progname, symname);
- _dl_exit (1);
+ return 1;
}
if (_dl_trace_prelink) {
_dl_debug_lookup (symname, tpnt, &symtab[symtab_index],
&sym_ref, elf_machine_type_class(reloc_type));
}
+#if defined USE_TLS && USE_TLS
+ tls_tpnt = sym_ref.tpnt;
+#endif
+ } else {
+ symbol_addr =symtab[symtab_index].st_value;
+#if defined USE_TLS && USE_TLS
+ tls_tpnt = tpnt;
+#endif
}
#if defined (__SUPPORT_LD_DEBUG__)
@@ -198,8 +210,8 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope,
case R_XTENSA_RTLD:
if (rpnt->r_addend == 1) {
- /* Grab the function pointer stashed at the beginning of the
- GOT by the GOT_INIT function. */
+ /* Grab the function pointer stashed at the beginning
+ of the GOT by the GOT_INIT function. */
*reloc_addr = *(ElfW(Addr) *) tpnt->dynamic_info[DT_PLTGOT];
} else if (rpnt->r_addend == 2) {
/* Store the link map for the object. */
@@ -213,6 +225,35 @@ _dl_do_reloc (struct elf_resolve *tpnt, struct r_scope_elem *scope,
*reloc_addr += tpnt->loadaddr + rpnt->r_addend;
break;
+#if defined USE_TLS && USE_TLS
+ case R_XTENSA_TLS_TPOFF:
+ CHECK_STATIC_TLS((struct link_map *) tls_tpnt);
+ *reloc_addr = symbol_addr + tls_tpnt->l_tls_offset + rpnt->r_addend;
+ break;
+ case R_XTENSA_TLSDESC_FN:
+#ifndef SHARED
+ CHECK_STATIC_TLS((struct link_map *) tls_tpnt);
+#else
+ if (!TRY_STATIC_TLS ((struct link_map *) tls_tpnt))
+ *reloc_addr = (ElfW(Addr)) _dl_tlsdesc_dynamic;
+ else
+#endif
+ *reloc_addr = (ElfW(Addr)) _dl_tlsdesc_return;
+ break;
+ case R_XTENSA_TLSDESC_ARG:
+#ifndef SHARED
+ CHECK_STATIC_TLS((struct link_map *) tls_tpnt);
+#else
+ if (!TRY_STATIC_TLS ((struct link_map *) tls_tpnt))
+ *reloc_addr = (ElfW(Addr))
+ _dl_make_tlsdesc_dynamic((struct link_map *) tls_tpnt,
+ symbol_addr + *reloc_addr);
+ else
+#endif
+ *reloc_addr += symbol_addr + tls_tpnt->l_tls_offset;
+ break;
+#endif
+
default:
return -1; /* Calls _dl_exit(1). */
}