summaryrefslogtreecommitdiff
path: root/ldso/include
diff options
context:
space:
mode:
Diffstat (limited to 'ldso/include')
-rw-r--r--ldso/include/dl-hash.h2
-rw-r--r--ldso/include/dl-syscall.h12
-rw-r--r--ldso/include/inline-hashtab.h265
-rw-r--r--ldso/include/ldsodefs.h7
-rw-r--r--ldso/include/tlsdeschtab.h119
5 files changed, 394 insertions, 11 deletions
diff --git a/ldso/include/dl-hash.h b/ldso/include/dl-hash.h
index e1e3e3f95..18f21aefa 100644
--- a/ldso/include/dl-hash.h
+++ b/ldso/include/dl-hash.h
@@ -70,6 +70,8 @@ struct elf_resolve {
size_t l_tls_modid;
/* Nonzero if _dl_init_static_tls should be called for this module */
unsigned int l_need_tls_init:1;
+ /* Address of TLS descriptor hash table. */
+ void *l_tlsdesc_table;
#endif
ElfW(Addr) mapaddr;
diff --git a/ldso/include/dl-syscall.h b/ldso/include/dl-syscall.h
index e556f7b56..0acd2ba4a 100644
--- a/ldso/include/dl-syscall.h
+++ b/ldso/include/dl-syscall.h
@@ -25,11 +25,7 @@ extern int _dl_errno;
/* Pull in whatever this particular arch's kernel thinks the kernel version of
* struct stat should look like. It turns out that each arch has a different
* opinion on the subject, and different kernel revs use different names... */
-#if defined(__sparc_v9__) && (__WORDSIZE == 64)
-#define kernel_stat64 stat
-#else
#define kernel_stat stat
-#endif
#include <bits/kernel_stat.h>
#include <bits/kernel_types.h>
@@ -55,7 +51,7 @@ extern int _dl_errno;
static __always_inline attribute_noreturn __cold void _dl_exit(int status)
{
INLINE_SYSCALL(_dl_exit, 1, status);
-#if defined __GNUC__
+#if defined __GNUC__ && !__GNUC_PREREQ (4, 4)
__builtin_unreachable(); /* shut up warning: 'noreturn' function does return*/
#else
while (1);
@@ -146,14 +142,10 @@ static __always_inline _syscall0(gid_t, _dl_getegid)
#define __NR__dl_getpid __NR_getpid
static __always_inline _syscall0(gid_t, _dl_getpid)
-#if defined __NR_readlinkat && !defined __NR_readlink
+#if defined __NR_readlinkat
# define __NR__dl_readlink __NR_readlinkat
static __always_inline _syscall4(int, _dl_readlink, int, id, const char *, path,
char *, buf, size_t, bufsiz)
-#elif defined __NR_readlink
-# define __NR__dl_readlink __NR_readlink
-static __always_inline _syscall3(int, _dl_readlink, const char *, path, char *, buf,
- size_t, bufsiz)
#endif
#ifdef __NR_pread64
diff --git a/ldso/include/inline-hashtab.h b/ldso/include/inline-hashtab.h
new file mode 100644
index 000000000..4a4812027
--- /dev/null
+++ b/ldso/include/inline-hashtab.h
@@ -0,0 +1,265 @@
+/*
+ * The hashcode handling code below is heavily inspired in libiberty's
+ * hashtab code, but with most adaptation points and support for
+ * deleting elements removed.
+ *
+ * Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ * Contributed by Vladimir Makarov (vmakarov@cygnus.com).
+ */
+
+#ifndef INLINE_HASHTAB_H
+# define INLINE_HASHTAB_H 1
+
+static __always_inline unsigned long
+higher_prime_number(unsigned long n)
+{
+ /* These are primes that are near, but slightly smaller than, a power of two. */
+ static const unsigned long primes[] = {
+ 7,
+ 13,
+ 31,
+ 61,
+ 127,
+ 251,
+ 509,
+ 1021,
+ 2039,
+ 4093,
+ 8191,
+ 16381,
+ 32749,
+ 65521,
+ 131071,
+ 262139,
+ 524287,
+ 1048573,
+ 2097143,
+ 4194301,
+ 8388593,
+ 16777213,
+ 33554393,
+ 67108859,
+ 134217689,
+ 268435399,
+ 536870909,
+ 1073741789,
+ /* 4294967291 */
+ ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
+ };
+ const unsigned long *low = &primes[0];
+ const unsigned long *high = &primes[ARRAY_SIZE(primes)];
+
+ while (low != high) {
+ const unsigned long *mid = low + (high - low) / 2;
+ if (n > *mid)
+ low = mid + 1;
+ else
+ high = mid;
+ }
+
+#if 0
+ /* If we've run out of primes, abort. */
+ if (n > *low) {
+ fprintf(stderr, "Cannot find prime bigger than %lu\n", n);
+ abort();
+ }
+#endif
+
+ return *low;
+}
+
+struct funcdesc_ht
+{
+ /* Table itself */
+ void **entries;
+
+ /* Current size (in entries) of the hash table */
+ size_t size;
+
+ /* Current number of elements */
+ size_t n_elements;
+};
+
+static __always_inline struct funcdesc_ht *
+htab_create(void)
+{
+ struct funcdesc_ht *ht = _dl_malloc(sizeof(*ht));
+ size_t ent_size;
+
+ if (!ht)
+ return NULL;
+ ht->size = 3;
+ ent_size = sizeof(void *) * ht->size;
+ ht->entries = _dl_malloc(ent_size);
+ if (!ht->entries)
+ return NULL;
+
+ ht->n_elements = 0;
+ _dl_memset(ht->entries, 0, ent_size);
+
+ return ht;
+}
+
+/*
+ * This is only called from _dl_loadaddr_unmap, so it's safe to call
+ * _dl_free(). See the discussion below.
+ */
+static __always_inline void
+htab_delete(struct funcdesc_ht *htab)
+{
+ size_t i;
+
+ for (i = htab->size - 1; i >= 0; i--)
+ if (htab->entries[i])
+ _dl_free(htab->entries[i]);
+
+ _dl_free(htab->entries);
+ _dl_free(htab);
+}
+
+/*
+ * Similar to htab_find_slot, but without several unwanted side effects:
+ * - Does not call htab->eq_f when it finds an existing entry.
+ * - Does not change the count of elements/searches/collisions in the
+ * hash table.
+ * This function also assumes there are no deleted entries in the table.
+ * HASH is the hash value for the element to be inserted.
+ */
+static __always_inline void **
+find_empty_slot_for_expand(struct funcdesc_ht *htab, int hash)
+{
+ size_t size = htab->size;
+ unsigned int index = hash % size;
+ void **slot = htab->entries + index;
+ int hash2;
+
+ if (!*slot)
+ return slot;
+
+ hash2 = 1 + hash % (size - 2);
+ for (;;) {
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ slot = htab->entries + index;
+ if (!*slot)
+ return slot;
+ }
+}
+
+/*
+ * The following function changes size of memory allocated for the
+ * entries and repeatedly inserts the table elements. The occupancy
+ * of the table after the call will be about 50%. Naturally the hash
+ * table must already exist. Remember also that the place of the
+ * table entries is changed. If memory allocation failures are allowed,
+ * this function will return zero, indicating that the table could not be
+ * expanded. If all goes well, it will return a non-zero value.
+ */
+static __always_inline int
+htab_expand(struct funcdesc_ht *htab, int (*hash_fn) (void *))
+{
+ void **oentries;
+ void **olimit;
+ void **p;
+ void **nentries;
+ size_t nsize;
+
+ oentries = htab->entries;
+ olimit = oentries + htab->size;
+
+ /*
+ * Resize only when table after removal of unused elements is either
+ * too full or too empty.
+ */
+ if (htab->n_elements * 2 > htab->size)
+ nsize = higher_prime_number(htab->n_elements * 2);
+ else
+ nsize = htab->size;
+
+ nentries = _dl_malloc(sizeof(*nentries) * nsize);
+ _dl_memset(nentries, 0, sizeof(*nentries) * nsize);
+ if (nentries == NULL)
+ return 0;
+ htab->entries = nentries;
+ htab->size = nsize;
+
+ p = oentries;
+ do {
+ if (*p)
+ *find_empty_slot_for_expand(htab, hash_fn(*p)) = *p;
+ p++;
+ } while (p < olimit);
+
+#if 0
+ /*
+ * We can't tell whether this was allocated by the _dl_malloc()
+ * built into ld.so or malloc() in the main executable or libc,
+ * and calling free() for something that wasn't malloc()ed could
+ * do Very Bad Things (TM). Take the conservative approach
+ * here, potentially wasting as much memory as actually used by
+ * the hash table, even if multiple growths occur. That's not
+ * so bad as to require some overengineered solution that would
+ * enable us to keep track of how it was allocated.
+ */
+ _dl_free(oentries);
+#endif
+ return 1;
+}
+
+/*
+ * This function searches for a hash table slot containing an entry
+ * equal to the given element. To delete an entry, call this with
+ * INSERT = 0, then call htab_clear_slot on the slot returned (possibly
+ * after doing some checks). To insert an entry, call this with
+ * INSERT = 1, then write the value you want into the returned slot.
+ * When inserting an entry, NULL may be returned if memory allocation
+ * fails.
+ */
+static __always_inline void **
+htab_find_slot(struct funcdesc_ht *htab, void *ptr, int insert,
+ int (*hash_fn)(void *), int (*eq_fn)(void *, void *))
+{
+ unsigned int index;
+ int hash, hash2;
+ size_t size;
+ void **entry;
+
+ if (htab->size * 3 <= htab->n_elements * 4 &&
+ htab_expand(htab, hash_fn) == 0)
+ return NULL;
+
+ hash = hash_fn(ptr);
+
+ size = htab->size;
+ index = hash % size;
+
+ entry = &htab->entries[index];
+ if (!*entry)
+ goto empty_entry;
+ else if (eq_fn(*entry, ptr))
+ return entry;
+
+ hash2 = 1 + hash % (size - 2);
+ for (;;) {
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ entry = &htab->entries[index];
+ if (!*entry)
+ goto empty_entry;
+ else if (eq_fn(*entry, ptr))
+ return entry;
+ }
+
+ empty_entry:
+ if (!insert)
+ return NULL;
+
+ htab->n_elements++;
+ return entry;
+}
+
+#endif
diff --git a/ldso/include/ldsodefs.h b/ldso/include/ldsodefs.h
index 0feed8954..9ae645c60 100644
--- a/ldso/include/ldsodefs.h
+++ b/ldso/include/ldsodefs.h
@@ -62,13 +62,18 @@ extern void _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
extern void _dl_allocate_static_tls (struct link_map *map)
internal_function attribute_hidden;
+extern int _dl_try_allocate_static_tls (struct link_map* map)
+ internal_function attribute_hidden;
/* Taken from glibc/elf/dl-reloc.c */
#define CHECK_STATIC_TLS(sym_map) \
do { \
- if (unlikely((sym_map)->l_tls_offset == NO_TLS_OFFSET)) \
+ if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET, 0)) \
_dl_allocate_static_tls (sym_map); \
} while (0)
+#define TRY_STATIC_TLS(sym_map) \
+ (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
+ || _dl_try_allocate_static_tls (sym_map) == 0)
/* These are internal entry points to the two halves of _dl_allocate_tls,
only used within rtld.c itself at startup time. */
diff --git a/ldso/include/tlsdeschtab.h b/ldso/include/tlsdeschtab.h
new file mode 100644
index 000000000..86baea148
--- /dev/null
+++ b/ldso/include/tlsdeschtab.h
@@ -0,0 +1,119 @@
+/* Hash table for TLS descriptors.
+ Copyright (C) 2005-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+ uClibc port by Baruch Siach <baruch@tkos.co.il>
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef TLSDESCHTAB_H
+# define TLSDESCHTAB_H 1
+
+# ifdef SHARED
+
+# include <inline-hashtab.h>
+
+inline static int
+hash_tlsdesc (void *p)
+{
+ struct tlsdesc_dynamic_arg *td = p;
+
+ /* We know all entries are for the same module, so ti_offset is the
+ only distinguishing entry. */
+ return td->tlsinfo.ti_offset;
+}
+
+inline static int
+eq_tlsdesc (void *p, void *q)
+{
+ struct tlsdesc_dynamic_arg *tdp = p, *tdq = q;
+
+ return tdp->tlsinfo.ti_offset == tdq->tlsinfo.ti_offset;
+}
+
+inline static int
+map_generation (struct link_map *map)
+{
+ size_t idx = map->l_tls_modid;
+ struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
+
+ /* Find the place in the dtv slotinfo list. */
+ do
+ {
+ /* Does it fit in the array of this list element? */
+ if (idx < listp->len)
+ {
+ /* We should never get here for a module in static TLS, so
+ we can assume that, if the generation count is zero, we
+ still haven't determined the generation count for this
+ module. */
+ if (listp->slotinfo[idx].gen)
+ return listp->slotinfo[idx].gen;
+ else
+ break;
+ }
+ idx -= listp->len;
+ listp = listp->next;
+ }
+ while (listp != NULL);
+
+ /* If we get to this point, the module still hasn't been assigned an
+ entry in the dtv slotinfo data structures, and it will when we're
+ done with relocations. At that point, the module will get a
+ generation number that is one past the current generation, so
+ return exactly that. */
+ return GL(dl_tls_generation) + 1;
+}
+
+void *
+internal_function
+_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
+{
+ struct funcdesc_ht *ht;
+ void **entry;
+ struct tlsdesc_dynamic_arg *td, test;
+
+ ht = map->l_tlsdesc_table;
+ if (! ht)
+ {
+ ht = htab_create ();
+ if (! ht)
+ return 0;
+ map->l_tlsdesc_table = ht;
+ }
+
+ test.tlsinfo.ti_module = map->l_tls_modid;
+ test.tlsinfo.ti_offset = ti_offset;
+ entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc);
+ if (*entry)
+ {
+ td = *entry;
+ return td;
+ }
+
+ *entry = td = _dl_malloc (sizeof (struct tlsdesc_dynamic_arg));
+ /* This may be higher than the map's generation, but it doesn't
+ matter much. Worst case, we'll have one extra DTV update per
+ thread. */
+ td->gen_count = map_generation (map);
+ td->tlsinfo = test.tlsinfo;
+
+ return td;
+}
+
+# endif /* SHARED */
+
+#endif