summaryrefslogtreecommitdiff
path: root/ldso/ldso/dl-elf.c
diff options
context:
space:
mode:
authorBernd Schmidt <bernds_cb1@t-online.de>2007-12-04 02:14:39 +0000
committerBernd Schmidt <bernds_cb1@t-online.de>2007-12-04 02:14:39 +0000
commit7129c310626a276f171da52fb0cbddd5c16b5a9b (patch)
treeaa469c03cf28f0b33f0662f93c9da0fdd9dc38ff /ldso/ldso/dl-elf.c
parentad989e28a40d78faa9ac6916355e8f1482900a35 (diff)
Blackfin FD-PIC patch 6/6.
These are mostly the changes necessary to deal with loading the libraries into memory. A couple new target macros are defined for this purpose, and the code in dl-elf.c is modified to deal with nommu systems.
Diffstat (limited to 'ldso/ldso/dl-elf.c')
-rw-r--r--ldso/ldso/dl-elf.c218
1 files changed, 169 insertions, 49 deletions
diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c
index a6ed8f6b5..071a6e4ef 100644
--- a/ldso/ldso/dl-elf.c
+++ b/ldso/ldso/dl-elf.c
@@ -354,6 +354,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
DL_LOADADDR_TYPE lib_loadaddr;
DL_INIT_LOADADDR_EXTRA_DECLS
+ libaddr = 0;
infile = _dl_open(libname, O_RDONLY, 0);
if (infile < 0) {
_dl_internal_error_number = LD_ERROR_NOFILE;
@@ -449,6 +450,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
ppnt++;
}
+ DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
+
maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
minvma = minvma & ~0xffffU;
@@ -456,17 +459,19 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
if (!piclib)
flags |= MAP_FIXED;
- status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
- maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
- if (_dl_mmap_check_error(status)) {
- _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
+ if (piclib == 0 || piclib == 1) {
+ status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
+ maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
+ if (_dl_mmap_check_error(status)) {
+ _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
+ _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
+ _dl_close(infile);
+ _dl_munmap(header, _dl_pagesize);
+ return NULL;
+ }
+ libaddr = (unsigned long) status;
+ flags |= MAP_FIXED;
}
- libaddr = (unsigned long) status;
- flags |= MAP_FIXED;
/* Get the memory to store the library */
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
@@ -474,11 +479,24 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
DL_INIT_LOADADDR(lib_loadaddr, libaddr, ppnt, epnt->e_phnum);
for (i = 0; i < epnt->e_phnum; i++) {
+ if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
+ char *addr;
+
+ addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
+ if (addr == NULL)
+ goto cant_map;
+
+ DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
+ ppnt++;
+ continue;
+ }
if (ppnt->p_type == PT_GNU_RELRO) {
relro_addr = ppnt->p_vaddr;
relro_size = ppnt->p_memsz;
}
if (ppnt->p_type == PT_LOAD) {
+ char *tryaddr;
+ ssize_t size;
/* See if this is a PIC library. */
if (i == 0 && ppnt->p_vaddr > 0x1000000) {
@@ -489,53 +507,155 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
if (ppnt->p_flags & PF_W) {
unsigned long map_size;
char *cpnt;
-
- status = (char *) _dl_mmap((char *) ((piclib ? libaddr : 0) +
- (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN)
- + ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags, infile,
- ppnt->p_offset & OFFS_ALIGN);
-
- if (_dl_mmap_check_error(status)) {
+ char *piclib2map = 0;
+
+ if (piclib == 2 &&
+ /* We might be able to avoid this
+ call if memsz doesn't require
+ an additional page, but this
+ would require mmap to always
+ return page-aligned addresses
+ and a whole number of pages
+ allocated. Unfortunately on
+ uClinux may return misaligned
+ addresses and may allocate
+ partial pages, so we may end up
+ doing unnecessary mmap calls.
+
+ This is what we could do if we
+ knew mmap would always return
+ aligned pages:
+
+ ((ppnt->p_vaddr + ppnt->p_filesz
+ + ADDR_ALIGN)
+ & PAGE_ALIGN)
+ < ppnt->p_vaddr + ppnt->p_memsz)
+
+ Instead, we have to do this: */
+ ppnt->p_filesz < ppnt->p_memsz)
+ {
+ piclib2map = (char *)
+ _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
+ + ppnt->p_memsz,
+ LXFLAGS(ppnt->p_flags),
+ flags | MAP_ANONYMOUS, -1, 0);
+ if (_dl_mmap_check_error(piclib2map))
+ goto cant_map;
+ DL_INIT_LOADADDR_HDR
+ (lib_loadaddr, piclib2map
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+ }
+
+ tryaddr = piclib == 2 ? piclib2map
+ : ((char*) (piclib ? libaddr : 0) +
+ (ppnt->p_vaddr & PAGE_ALIGN));
+
+ size = (ppnt->p_vaddr & ADDR_ALIGN)
+ + ppnt->p_filesz;
+
+ /* For !MMU, mmap to fixed address will fail.
+ So instead of desperately call mmap and fail,
+ we set status to MAP_FAILED to save a call
+ to mmap (). */
+#ifndef __ARCH_USE_MMU__
+ if (piclib2map == 0)
+#endif
+ status = (char *) _dl_mmap
+ (tryaddr, size, LXFLAGS(ppnt->p_flags),
+ flags | (piclib2map ? MAP_FIXED : 0),
+ infile, ppnt->p_offset & OFFS_ALIGN);
+#ifndef __ARCH_USE_MMU__
+ else
+ status = MAP_FAILED;
+#endif
+#ifdef _DL_PREAD
+ if (_dl_mmap_check_error(status) && piclib2map
+ && (_DL_PREAD (infile, tryaddr, size,
+ ppnt->p_offset & OFFS_ALIGN)
+ == size))
+ status = tryaddr;
+#endif
+ if (_dl_mmap_check_error(status)
+ || (tryaddr && tryaddr != status)) {
+ cant_map:
_dl_dprintf(2, "%s:%i: can't map '%s'\n",
_dl_progname, __LINE__, libname);
_dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_munmap((char *) libaddr, maxvma - minvma);
+ DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
_dl_close(infile);
_dl_munmap(header, _dl_pagesize);
return NULL;
}
- /* Pad the last page with zeroes. */
- cpnt = (char *) (status + (ppnt->p_vaddr & ADDR_ALIGN) +
- ppnt->p_filesz);
- while (((unsigned long) cpnt) & ADDR_ALIGN)
- *cpnt++ = 0;
-
- /* I am not quite sure if this is completely
- * correct to do or not, but the basic way that
- * we handle bss segments is that we mmap
- * /dev/zero if there are any pages left over
- * that are not mapped as part of the file */
-
- map_size = (ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & PAGE_ALIGN;
-
- if (map_size < ppnt->p_vaddr + ppnt->p_memsz)
- status = (char *) _dl_mmap((char *) map_size +
- (piclib ? libaddr : 0),
- ppnt->p_vaddr + ppnt->p_memsz - map_size,
- LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS, -1, 0);
- } else
- status = (char *) _dl_mmap((char *) (ppnt->p_vaddr & PAGE_ALIGN)
- + (piclib ? libaddr : 0), (ppnt->p_vaddr & ADDR_ALIGN) +
- ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags,
- infile, ppnt->p_offset & OFFS_ALIGN);
- if (_dl_mmap_check_error(status)) {
- _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_munmap((char *) libaddr, maxvma - minvma);
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
+ if (! piclib2map)
+ DL_INIT_LOADADDR_HDR
+ (lib_loadaddr, status
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+
+ /* Now we want to allocate and
+ zero-out any data from the end of
+ the region we mapped in from the
+ file (filesz) to the end of the
+ loadable segment (memsz). We may
+ need additional pages for memsz,
+ that we map in below, and we can
+ count on the kernel to zero them
+ out, but we have to zero out stuff
+ in the last page that we mapped in
+ from the file. However, we can't
+ assume to have actually obtained
+ full pages from the kernel, since
+ we didn't ask for them, and uClibc
+ may not give us full pages for
+ small allocations. So only zero
+ out up to memsz or the end of the
+ page, whichever comes first. */
+
+ /* CPNT is the beginning of the memsz
+ portion not backed by filesz. */
+ cpnt = (char *) (status + size);
+
+ /* MAP_SIZE is the address of the
+ beginning of the next page. */
+ map_size = (ppnt->p_vaddr + ppnt->p_filesz
+ + ADDR_ALIGN) & PAGE_ALIGN;
+
+#ifndef MIN
+# define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+ _dl_memset (cpnt, 0,
+ MIN (map_size
+ - (ppnt->p_vaddr
+ + ppnt->p_filesz),
+ ppnt->p_memsz
+ - ppnt->p_filesz));
+
+ if (map_size < ppnt->p_vaddr + ppnt->p_memsz
+ && !piclib2map) {
+ tryaddr = map_size + (char*)(piclib ? libaddr : 0);
+ status = (char *) _dl_mmap(tryaddr,
+ ppnt->p_vaddr + ppnt->p_memsz - map_size,
+ LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (_dl_mmap_check_error(status)
+ || tryaddr != status)
+ goto cant_map;
+ }
+ } else {
+ tryaddr = (piclib == 2 ? 0
+ : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
+ + (piclib ? libaddr : 0));
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
+ status = (char *) _dl_mmap
+ (tryaddr, size, LXFLAGS(ppnt->p_flags),
+ flags | (piclib == 2 ? MAP_EXECUTABLE
+ | MAP_DENYWRITE : 0),
+ infile, ppnt->p_offset & OFFS_ALIGN);
+ if (_dl_mmap_check_error(status)
+ || (tryaddr && tryaddr != status))
+ goto cant_map;
+ DL_INIT_LOADADDR_HDR
+ (lib_loadaddr, status
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
}
/* if (libaddr == 0 && piclib) {