summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ldso/include/dl-defs.h51
-rw-r--r--ldso/ldso/dl-elf.c218
-rw-r--r--ldso/libdl/libdl.c4
3 files changed, 222 insertions, 51 deletions
diff --git a/ldso/include/dl-defs.h b/ldso/include/dl-defs.h
index 3de7f5230..2c4402929 100644
--- a/ldso/include/dl-defs.h
+++ b/ldso/include/dl-defs.h
@@ -110,7 +110,7 @@ typedef struct {
#endif
/* Initialize a LOADADDR representing the loader itself. It's only
- * called from DL_BOOT, so additional arguments passed to it may be
+ * called from DL_START, so additional arguments passed to it may be
* referenced.
*/
#ifndef DL_INIT_LOADADDR_BOOT
@@ -144,6 +144,12 @@ typedef struct {
((LOADADDR) = (DL_LOADADDR_TYPE)(BASEADDR))
#endif
+/* Update LOADADDR with information about PHDR, just mapped to the
+ given ADDR. */
+#ifndef DL_INIT_LOADADDR_HDR
+# define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) /* Do nothing. */
+#endif
+
/* Convert a DL_LOADADDR_TYPE to an identifying pointer. Used mostly
* for debugging.
*/
@@ -166,6 +172,13 @@ typedef struct {
&& (!(TFROM) || (TFROM)->loadaddr < (TPNT)->loadaddr))
#endif
+/* This is called from dladdr() to give targets that use function descriptors
+ * a chance to map a function descriptor's address to the function's entry
+ * point before trying to find in which library it's defined. */
+#ifndef DL_LOOKUP_ADDRESS
+#define DL_LOOKUP_ADDRESS(ADDRESS) (ADDRESS)
+#endif
+
/* Use this macro to convert a pointer to a function's entry point to
* a pointer to function. The pointer is assumed to have already been
* relocated. LOADADDR is passed because it may contain additional
@@ -202,4 +215,40 @@ typedef struct {
# define DL_FIND_HASH_VALUE(TPNT, TYPE, SYM) (DL_RELOC_ADDR ((SYM)->st_value, (TPNT)->loadaddr))
#endif
+/* Unmap all previously-mapped segments accumulated in LOADADDR.
+ Generally used when an error occurs during loading. */
+#ifndef DL_LOADADDR_UNMAP
+# define DL_LOADADDR_UNMAP(LOADADDR, LEN) \
+ _dl_munmap((char *) (LOADADDR), (LEN))
+#endif
+
+/* Similar to DL_LOADADDR_UNMAP, but used for libraries that have been
+ dlopen()ed successfully, when they're dlclose()d. */
+#ifndef DL_LIB_UNMAP
+# define DL_LIB_UNMAP(LIB, LEN) (DL_LOADADDR_UNMAP ((LIB)->loadaddr, (LEN)))
+#endif
+
+/* Define this to verify that a library named LIBNAME, whose ELF
+ headers are pointed to by EPNT, is suitable for dynamic linking.
+ If it is not, print an error message (optional) and return NULL.
+ If the library can have its segments relocated independently,
+ arrange for PICLIB to be set to 2. If all segments have to be
+ relocated by the same amount, set it to 1. If it has to be loaded
+ at physical addresses as specified in the program headers, set it
+ to 0. A reasonable (?) guess for PICLIB will already be in place,
+ so it is safe to do nothing here. */
+#ifndef DL_CHECK_LIB_TYPE
+# define DL_CHECK_LIB_TYPE(EPNT, PICLIB, PROGNAME, LIBNAME) (void)0
+#endif
+
+/* Define this if you have special segment. */
+#ifndef DL_IS_SPECIAL_SEGMENT
+# define DL_IS_SPECIAL_SEGMENT(EPNT, PPNT) 0
+#endif
+
+/* Define this if you want to use special method to map the segment. */
+#ifndef DL_MAP_SEGMENT
+# define DL_MAP_SEGMENT(EPNT, PPNT, INFILE, FLAGS) 0
+#endif
+
#endif /* _LD_DEFS_H */
diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c
index a6ed8f6b5..071a6e4ef 100644
--- a/ldso/ldso/dl-elf.c
+++ b/ldso/ldso/dl-elf.c
@@ -354,6 +354,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
DL_LOADADDR_TYPE lib_loadaddr;
DL_INIT_LOADADDR_EXTRA_DECLS
+ libaddr = 0;
infile = _dl_open(libname, O_RDONLY, 0);
if (infile < 0) {
_dl_internal_error_number = LD_ERROR_NOFILE;
@@ -449,6 +450,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
ppnt++;
}
+ DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
+
maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
minvma = minvma & ~0xffffU;
@@ -456,17 +459,19 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
if (!piclib)
flags |= MAP_FIXED;
- status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
- maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
- if (_dl_mmap_check_error(status)) {
- _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
+ if (piclib == 0 || piclib == 1) {
+ status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
+ maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
+ if (_dl_mmap_check_error(status)) {
+ _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
+ _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
+ _dl_close(infile);
+ _dl_munmap(header, _dl_pagesize);
+ return NULL;
+ }
+ libaddr = (unsigned long) status;
+ flags |= MAP_FIXED;
}
- libaddr = (unsigned long) status;
- flags |= MAP_FIXED;
/* Get the memory to store the library */
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
@@ -474,11 +479,24 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
DL_INIT_LOADADDR(lib_loadaddr, libaddr, ppnt, epnt->e_phnum);
for (i = 0; i < epnt->e_phnum; i++) {
+ if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
+ char *addr;
+
+ addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
+ if (addr == NULL)
+ goto cant_map;
+
+ DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
+ ppnt++;
+ continue;
+ }
if (ppnt->p_type == PT_GNU_RELRO) {
relro_addr = ppnt->p_vaddr;
relro_size = ppnt->p_memsz;
}
if (ppnt->p_type == PT_LOAD) {
+ char *tryaddr;
+ ssize_t size;
/* See if this is a PIC library. */
if (i == 0 && ppnt->p_vaddr > 0x1000000) {
@@ -489,53 +507,155 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
if (ppnt->p_flags & PF_W) {
unsigned long map_size;
char *cpnt;
-
- status = (char *) _dl_mmap((char *) ((piclib ? libaddr : 0) +
- (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN)
- + ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags, infile,
- ppnt->p_offset & OFFS_ALIGN);
-
- if (_dl_mmap_check_error(status)) {
+ char *piclib2map = 0;
+
+ if (piclib == 2 &&
+ /* We might be able to avoid this
+ call if memsz doesn't require
+ an additional page, but this
+ would require mmap to always
+ return page-aligned addresses
+ and a whole number of pages
+ allocated. Unfortunately on
+ uClinux may return misaligned
+ addresses and may allocate
+ partial pages, so we may end up
+ doing unnecessary mmap calls.
+
+ This is what we could do if we
+ knew mmap would always return
+ aligned pages:
+
+ ((ppnt->p_vaddr + ppnt->p_filesz
+ + ADDR_ALIGN)
+ & PAGE_ALIGN)
+ < ppnt->p_vaddr + ppnt->p_memsz)
+
+ Instead, we have to do this: */
+ ppnt->p_filesz < ppnt->p_memsz)
+ {
+ piclib2map = (char *)
+ _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
+ + ppnt->p_memsz,
+ LXFLAGS(ppnt->p_flags),
+ flags | MAP_ANONYMOUS, -1, 0);
+ if (_dl_mmap_check_error(piclib2map))
+ goto cant_map;
+ DL_INIT_LOADADDR_HDR
+ (lib_loadaddr, piclib2map
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+ }
+
+ tryaddr = piclib == 2 ? piclib2map
+ : ((char*) (piclib ? libaddr : 0) +
+ (ppnt->p_vaddr & PAGE_ALIGN));
+
+ size = (ppnt->p_vaddr & ADDR_ALIGN)
+ + ppnt->p_filesz;
+
+ /* For !MMU, mmap to fixed address will fail.
+ So instead of desperately call mmap and fail,
+ we set status to MAP_FAILED to save a call
+ to mmap (). */
+#ifndef __ARCH_USE_MMU__
+ if (piclib2map == 0)
+#endif
+ status = (char *) _dl_mmap
+ (tryaddr, size, LXFLAGS(ppnt->p_flags),
+ flags | (piclib2map ? MAP_FIXED : 0),
+ infile, ppnt->p_offset & OFFS_ALIGN);
+#ifndef __ARCH_USE_MMU__
+ else
+ status = MAP_FAILED;
+#endif
+#ifdef _DL_PREAD
+ if (_dl_mmap_check_error(status) && piclib2map
+ && (_DL_PREAD (infile, tryaddr, size,
+ ppnt->p_offset & OFFS_ALIGN)
+ == size))
+ status = tryaddr;
+#endif
+ if (_dl_mmap_check_error(status)
+ || (tryaddr && tryaddr != status)) {
+ cant_map:
_dl_dprintf(2, "%s:%i: can't map '%s'\n",
_dl_progname, __LINE__, libname);
_dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_munmap((char *) libaddr, maxvma - minvma);
+ DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
_dl_close(infile);
_dl_munmap(header, _dl_pagesize);
return NULL;
}
- /* Pad the last page with zeroes. */
- cpnt = (char *) (status + (ppnt->p_vaddr & ADDR_ALIGN) +
- ppnt->p_filesz);
- while (((unsigned long) cpnt) & ADDR_ALIGN)
- *cpnt++ = 0;
-
- /* I am not quite sure if this is completely
- * correct to do or not, but the basic way that
- * we handle bss segments is that we mmap
- * /dev/zero if there are any pages left over
- * that are not mapped as part of the file */
-
- map_size = (ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & PAGE_ALIGN;
-
- if (map_size < ppnt->p_vaddr + ppnt->p_memsz)
- status = (char *) _dl_mmap((char *) map_size +
- (piclib ? libaddr : 0),
- ppnt->p_vaddr + ppnt->p_memsz - map_size,
- LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS, -1, 0);
- } else
- status = (char *) _dl_mmap((char *) (ppnt->p_vaddr & PAGE_ALIGN)
- + (piclib ? libaddr : 0), (ppnt->p_vaddr & ADDR_ALIGN) +
- ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags,
- infile, ppnt->p_offset & OFFS_ALIGN);
- if (_dl_mmap_check_error(status)) {
- _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_munmap((char *) libaddr, maxvma - minvma);
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
+ if (! piclib2map)
+ DL_INIT_LOADADDR_HDR
+ (lib_loadaddr, status
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+
+ /* Now we want to allocate and
+ zero-out any data from the end of
+ the region we mapped in from the
+ file (filesz) to the end of the
+ loadable segment (memsz). We may
+ need additional pages for memsz,
+ that we map in below, and we can
+ count on the kernel to zero them
+ out, but we have to zero out stuff
+ in the last page that we mapped in
+ from the file. However, we can't
+ assume to have actually obtained
+ full pages from the kernel, since
+ we didn't ask for them, and uClibc
+ may not give us full pages for
+ small allocations. So only zero
+ out up to memsz or the end of the
+ page, whichever comes first. */
+
+ /* CPNT is the beginning of the memsz
+ portion not backed by filesz. */
+ cpnt = (char *) (status + size);
+
+ /* MAP_SIZE is the address of the
+ beginning of the next page. */
+ map_size = (ppnt->p_vaddr + ppnt->p_filesz
+ + ADDR_ALIGN) & PAGE_ALIGN;
+
+#ifndef MIN
+# define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+ _dl_memset (cpnt, 0,
+ MIN (map_size
+ - (ppnt->p_vaddr
+ + ppnt->p_filesz),
+ ppnt->p_memsz
+ - ppnt->p_filesz));
+
+ if (map_size < ppnt->p_vaddr + ppnt->p_memsz
+ && !piclib2map) {
+ tryaddr = map_size + (char*)(piclib ? libaddr : 0);
+ status = (char *) _dl_mmap(tryaddr,
+ ppnt->p_vaddr + ppnt->p_memsz - map_size,
+ LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (_dl_mmap_check_error(status)
+ || tryaddr != status)
+ goto cant_map;
+ }
+ } else {
+ tryaddr = (piclib == 2 ? 0
+ : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
+ + (piclib ? libaddr : 0));
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
+ status = (char *) _dl_mmap
+ (tryaddr, size, LXFLAGS(ppnt->p_flags),
+ flags | (piclib == 2 ? MAP_EXECUTABLE
+ | MAP_DENYWRITE : 0),
+ infile, ppnt->p_offset & OFFS_ALIGN);
+ if (_dl_mmap_check_error(status)
+ || (tryaddr && tryaddr != status))
+ goto cant_map;
+ DL_INIT_LOADADDR_HDR
+ (lib_loadaddr, status
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
}
/* if (libaddr == 0 && piclib) {
diff --git a/ldso/libdl/libdl.c b/ldso/libdl/libdl.c
index 4e909a16f..d21f8a6fd 100644
--- a/ldso/libdl/libdl.c
+++ b/ldso/libdl/libdl.c
@@ -587,7 +587,7 @@ static int do_dlclose(void *vhandle, int need_fini)
if (end < ppnt->p_vaddr + ppnt->p_memsz)
end = ppnt->p_vaddr + ppnt->p_memsz;
}
- _dl_munmap((void*)tpnt->loadaddr, end);
+ DL_LIB_UNMAP (tpnt, end);
/* Free elements in RTLD_LOCAL scope list */
for (runp = tpnt->rtld_local; runp; runp = tmp) {
tmp = runp->next;
@@ -713,6 +713,8 @@ int dladdr(const void *__address, Dl_info * __info)
_dl_if_debug_print("__address: %p __info: %p\n", __address, __info);
+ __address = DL_LOOKUP_ADDRESS (__address);
+
for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next) {
struct elf_resolve *tpnt;