From 56ea76b6bf190bffdc07aba90e4b25dfc096027b Mon Sep 17 00:00:00 2001 From: Bernd Schmidt Date: Mon, 11 Apr 2011 13:13:18 +0200 Subject: Fix nommu handling of DT_TEXTREL We have a problem with DT_TEXTREL shared libraries on nommu machines. The dynamic linker's strategy is to map the text segment read-only first, then look for DT_TEXTREL, and use mprotect to change protections if necessary. This fails on nommu, since a nommu kernel can decide to share the memory for private read-only file mappings, and mprotect doesn't (can't) do anything about this sharing. Existing nommu targets apparently have no need for this, but on C6X, we may need to assign library indices at run-time if no --dsbt-index option was passed to the linker at build time. Hence, the following patch, which instead of using mprotect, redoes the mapping with PF_W set. Signed-off-by: Bernd Schmidt --- ldso/include/dl-elf.h | 2 +- ldso/include/ldso.h | 2 + ldso/ldso/bfin/dl-inlines.h | 41 +++++-- ldso/ldso/bfin/dl-sysdep.h | 2 + ldso/ldso/c6x/dl-inlines.h | 29 +++++ ldso/ldso/c6x/dl-sysdep.h | 3 + ldso/ldso/dl-elf.c | 284 ++++++++++++++++++++++---------------------- ldso/ldso/frv/dl-inlines.h | 41 +++++-- ldso/ldso/frv/dl-sysdep.h | 2 + 9 files changed, 248 insertions(+), 158 deletions(-) (limited to 'ldso') diff --git a/ldso/include/dl-elf.h b/ldso/include/dl-elf.h index 7fbb373b4..e7203fd8f 100644 --- a/ldso/include/dl-elf.h +++ b/ldso/include/dl-elf.h @@ -184,7 +184,7 @@ unsigned int __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info ADJUST_DYN_INFO(DT_DSBT_BASE_IDX, load_off); /* Initialize loadmap dsbt info. */ - load_off.map->dsbt_table = dynamic_info[DT_DSBT_BASE_IDX]; + load_off.map->dsbt_table = (void *)dynamic_info[DT_DSBT_BASE_IDX]; load_off.map->dsbt_size = dynamic_info[DT_DSBT_SIZE_IDX]; load_off.map->dsbt_index = dynamic_info[DT_DSBT_INDEX_IDX]; #endif diff --git a/ldso/include/ldso.h b/ldso/include/ldso.h index 69b5dd75a..95bcd14a4 100644 --- a/ldso/include/ldso.h +++ b/ldso/include/ldso.h @@ -34,6 +34,8 @@ #include /* Pull in the arch specific page size */ #include +/* Pull in the MIN macro */ +#include /* Pull in the ldso syscalls and string functions */ #ifndef __ARCH_HAS_NO_SHARED__ #include diff --git a/ldso/ldso/bfin/dl-inlines.h b/ldso/ldso/bfin/dl-inlines.h index 6524f5edc..969986218 100644 --- a/ldso/ldso/bfin/dl-inlines.h +++ b/ldso/ldso/bfin/dl-inlines.h @@ -88,14 +88,39 @@ __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, segdata->p_memsz = phdr->p_memsz; #if defined (__SUPPORT_LD_DEBUG__) - { - extern char *_dl_debug; - extern int _dl_debug_file; - if (_dl_debug) - _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", - loadaddr.map->nsegs-1, - segdata->p_vaddr, segdata->addr, segdata->p_memsz); - } + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, segdata->p_memsz); +#endif +} + +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_fdpic_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr + && loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit (-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap (oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr) addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); #endif } diff --git a/ldso/ldso/bfin/dl-sysdep.h b/ldso/ldso/bfin/dl-sysdep.h index 50c750990..168e5c89a 100644 --- a/ldso/ldso/bfin/dl-sysdep.h +++ b/ldso/ldso/bfin/dl-sysdep.h @@ -120,6 +120,8 @@ struct funcdesc_ht; #define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ dl_init_loadaddr_load_count)) +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) #define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ (__dl_loadaddr_unmap ((LOADADDR), (NULL))) #define DL_LIB_UNMAP(LIB, LEN) \ diff --git a/ldso/ldso/c6x/dl-inlines.h b/ldso/ldso/c6x/dl-inlines.h index d8fb42c55..62e1cc9ca 100644 --- a/ldso/ldso/c6x/dl-inlines.h +++ b/ldso/ldso/c6x/dl-inlines.h @@ -74,6 +74,35 @@ __dl_init_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr, #endif } +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_dsbt_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr + && loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit (-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap (oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr) addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); +#endif +} + static __always_inline void __dl_loadaddr_unmap (struct elf32_dsbt_loadaddr loadaddr) { diff --git a/ldso/ldso/c6x/dl-sysdep.h b/ldso/ldso/c6x/dl-sysdep.h index 8f1b122d3..ff7accdf1 100644 --- a/ldso/ldso/c6x/dl-sysdep.h +++ b/ldso/ldso/c6x/dl-sysdep.h @@ -104,6 +104,9 @@ struct elf32_dsbt_loadaddr; (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ dl_init_loadaddr_load_count)) +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) + #define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ (__dl_loadaddr_unmap ((LOADADDR))) diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c index 505247e6f..91e8a97ca 100644 --- a/ldso/ldso/dl-elf.c +++ b/ldso/ldso/dl-elf.c @@ -314,6 +314,121 @@ goof: return NULL; } +/* + * Make a writeable mapping of a segment, regardless of whether PF_W is + * set or not. + */ +static void * +map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags, + unsigned long libaddr) +{ + int prot_flags = ppnt->p_flags | PF_W; + char *status, *retval; + char *tryaddr; + ssize_t size; + unsigned long map_size; + char *cpnt; + char *piclib2map = NULL; + + if (piclib == 2 && + /* We might be able to avoid this call if memsz doesn't + require an additional page, but this would require mmap + to always return page-aligned addresses and a whole + number of pages allocated. Unfortunately on uClinux + may return misaligned addresses and may allocate + partial pages, so we may end up doing unnecessary mmap + calls. + + This is what we could do if we knew mmap would always + return aligned pages: + + ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & + PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz) + + Instead, we have to do this: */ + ppnt->p_filesz < ppnt->p_memsz) + { + piclib2map = (char *) + _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz, + LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0); + if (_dl_mmap_check_error(piclib2map)) + return 0; + } + + tryaddr = piclib == 2 ? piclib2map + : ((char*) (piclib ? libaddr : 0) + + (ppnt->p_vaddr & PAGE_ALIGN)); + + size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz; + + /* For !MMU, mmap to fixed address will fail. + So instead of desperately call mmap and fail, + we set status to MAP_FAILED to save a call + to mmap (). */ +#ifndef __ARCH_USE_MMU__ + if (piclib2map == 0) +#endif + status = (char *) _dl_mmap + (tryaddr, size, LXFLAGS(prot_flags), + flags | (piclib2map ? MAP_FIXED : 0), + infile, ppnt->p_offset & OFFS_ALIGN); +#ifndef __ARCH_USE_MMU__ + else + status = MAP_FAILED; +#endif +#ifdef _DL_PREAD + if (_dl_mmap_check_error(status) && piclib2map + && (_DL_PREAD (infile, tryaddr, size, + ppnt->p_offset & OFFS_ALIGN) == size)) + status = tryaddr; +#endif + if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status)) + return 0; + + if (piclib2map) + retval = piclib2map; + else + retval = status; + + /* Now we want to allocate and zero-out any data from the end + of the region we mapped in from the file (filesz) to the + end of the loadable segment (memsz). We may need + additional pages for memsz, that we map in below, and we + can count on the kernel to zero them out, but we have to + zero out stuff in the last page that we mapped in from the + file. However, we can't assume to have actually obtained + full pages from the kernel, since we didn't ask for them, + and uClibc may not give us full pages for small + allocations. So only zero out up to memsz or the end of + the page, whichever comes first. */ + + /* CPNT is the beginning of the memsz portion not backed by + filesz. */ + cpnt = (char *) (status + size); + + /* MAP_SIZE is the address of the + beginning of the next page. */ + map_size = (ppnt->p_vaddr + ppnt->p_filesz + + ADDR_ALIGN) & PAGE_ALIGN; + + _dl_memset (cpnt, 0, + MIN (map_size + - (ppnt->p_vaddr + + ppnt->p_filesz), + ppnt->p_memsz + - ppnt->p_filesz)); + + if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) { + tryaddr = map_size + (char*)(piclib ? libaddr : 0); + status = (char *) _dl_mmap(tryaddr, + ppnt->p_vaddr + ppnt->p_memsz - map_size, + LXFLAGS(prot_flags), + flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (_dl_mmap_check_error(status) || tryaddr != status) + return NULL; + } + return retval; +} /* * Read one ELF library into memory, mmap it into the correct locations and @@ -475,6 +590,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma), maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0); if (_dl_mmap_check_error(status)) { + cant_map: _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname); _dl_internal_error_number = LD_ERROR_MMAP_FAILED; _dl_close(infile); @@ -495,8 +611,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, char *addr; addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags); - if (addr == NULL) + if (addr == NULL) { + cant_map1: + DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma); goto cant_map; + } DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt); ppnt++; @@ -517,141 +636,9 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, } if (ppnt->p_flags & PF_W) { - unsigned long map_size; - char *cpnt; - char *piclib2map = 0; - - if (piclib == 2 && - /* We might be able to avoid this - call if memsz doesn't require - an additional page, but this - would require mmap to always - return page-aligned addresses - and a whole number of pages - allocated. Unfortunately on - uClinux may return misaligned - addresses and may allocate - partial pages, so we may end up - doing unnecessary mmap calls. - - This is what we could do if we - knew mmap would always return - aligned pages: - - ((ppnt->p_vaddr + ppnt->p_filesz - + ADDR_ALIGN) - & PAGE_ALIGN) - < ppnt->p_vaddr + ppnt->p_memsz) - - Instead, we have to do this: */ - ppnt->p_filesz < ppnt->p_memsz) - { - piclib2map = (char *) - _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) - + ppnt->p_memsz, - LXFLAGS(ppnt->p_flags), - flags | MAP_ANONYMOUS, -1, 0); - if (_dl_mmap_check_error(piclib2map)) - goto cant_map; - DL_INIT_LOADADDR_HDR - (lib_loadaddr, piclib2map - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); - } - - tryaddr = piclib == 2 ? piclib2map - : ((char*) (piclib ? libaddr : 0) + - (ppnt->p_vaddr & PAGE_ALIGN)); - - size = (ppnt->p_vaddr & ADDR_ALIGN) - + ppnt->p_filesz; - - /* For !MMU, mmap to fixed address will fail. - So instead of desperately call mmap and fail, - we set status to MAP_FAILED to save a call - to mmap (). */ -#ifndef __ARCH_USE_MMU__ - if (piclib2map == 0) -#endif - status = (char *) _dl_mmap - (tryaddr, size, LXFLAGS(ppnt->p_flags), - flags | (piclib2map ? MAP_FIXED : 0), - infile, ppnt->p_offset & OFFS_ALIGN); -#ifndef __ARCH_USE_MMU__ - else - status = MAP_FAILED; -#endif -#ifdef _DL_PREAD - if (_dl_mmap_check_error(status) && piclib2map - && (_DL_PREAD (infile, tryaddr, size, - ppnt->p_offset & OFFS_ALIGN) - == size)) - status = tryaddr; -#endif - if (_dl_mmap_check_error(status) - || (tryaddr && tryaddr != status)) { - cant_map: - _dl_dprintf(2, "%s:%i: can't map '%s'\n", - _dl_progname, __LINE__, libname); - _dl_internal_error_number = LD_ERROR_MMAP_FAILED; - DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma); - _dl_close(infile); - _dl_munmap(header, _dl_pagesize); - return NULL; - } - - if (! piclib2map) { - DL_INIT_LOADADDR_HDR - (lib_loadaddr, status - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); - } - /* Now we want to allocate and - zero-out any data from the end of - the region we mapped in from the - file (filesz) to the end of the - loadable segment (memsz). We may - need additional pages for memsz, - that we map in below, and we can - count on the kernel to zero them - out, but we have to zero out stuff - in the last page that we mapped in - from the file. However, we can't - assume to have actually obtained - full pages from the kernel, since - we didn't ask for them, and uClibc - may not give us full pages for - small allocations. So only zero - out up to memsz or the end of the - page, whichever comes first. */ - - /* CPNT is the beginning of the memsz - portion not backed by filesz. */ - cpnt = (char *) (status + size); - - /* MAP_SIZE is the address of the - beginning of the next page. */ - map_size = (ppnt->p_vaddr + ppnt->p_filesz - + ADDR_ALIGN) & PAGE_ALIGN; - -#ifndef MIN -# define MIN(a,b) ((a) < (b) ? (a) : (b)) -#endif - _dl_memset (cpnt, 0, - MIN (map_size - - (ppnt->p_vaddr - + ppnt->p_filesz), - ppnt->p_memsz - - ppnt->p_filesz)); - - if (map_size < ppnt->p_vaddr + ppnt->p_memsz - && !piclib2map) { - tryaddr = map_size + (char*)(piclib ? libaddr : 0); - status = (char *) _dl_mmap(tryaddr, - ppnt->p_vaddr + ppnt->p_memsz - map_size, - LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (_dl_mmap_check_error(status) - || tryaddr != status) - goto cant_map; - } + status = map_writeable (infile, ppnt, piclib, flags, libaddr); + if (status == NULL) + goto cant_map1; } else { tryaddr = (piclib == 2 ? 0 : (char *) (ppnt->p_vaddr & PAGE_ALIGN) @@ -664,11 +651,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, infile, ppnt->p_offset & OFFS_ALIGN); if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status)) - goto cant_map; - DL_INIT_LOADADDR_HDR - (lib_loadaddr, status - + (ppnt->p_vaddr & ADDR_ALIGN), ppnt); + goto cant_map1; } + DL_INIT_LOADADDR_HDR(lib_loadaddr, + status + (ppnt->p_vaddr & ADDR_ALIGN), + ppnt); /* if (libaddr == 0 && piclib) { libaddr = (unsigned long) status; @@ -677,7 +664,6 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, } ppnt++; } - _dl_close(infile); /* For a non-PIC library, the addresses are all absolute */ if (piclib) { @@ -696,6 +682,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n", _dl_progname, libname); _dl_munmap(header, _dl_pagesize); + _dl_close(infile); return NULL; } @@ -711,10 +698,23 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff]; for (i = 0; i < epnt->e_phnum; i++, ppnt++) { if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) { +#ifdef __ARCH_USE_MMU__ _dl_mprotect((void *) ((piclib ? libaddr : 0) + (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz, PROT_READ | PROT_WRITE | PROT_EXEC); +#else + void *new_addr; + new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr); + if (!new_addr) { + _dl_dprintf(_dl_debug_file, "Can't modify %s's text section.", + libname); + _dl_exit(1); + } + DL_UPDATE_LOADADDR_HDR(lib_loadaddr, + new_addr + (ppnt->p_vaddr & ADDR_ALIGN), + ppnt); +#endif } } #else @@ -725,6 +725,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, #endif } + _dl_close(infile); + tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info, dynamic_addr, 0); tpnt->relro_addr = relro_addr; diff --git a/ldso/ldso/frv/dl-inlines.h b/ldso/ldso/frv/dl-inlines.h index 95233a7c0..0395a7e23 100644 --- a/ldso/ldso/frv/dl-inlines.h +++ b/ldso/ldso/frv/dl-inlines.h @@ -72,14 +72,39 @@ __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, segdata->p_memsz = phdr->p_memsz; #if defined (__SUPPORT_LD_DEBUG__) - { - extern char *_dl_debug; - extern int _dl_debug_file; - if (_dl_debug) - _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", - loadaddr.map->nsegs-1, - segdata->p_vaddr, segdata->addr, segdata->p_memsz); - } + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, segdata->p_memsz); +#endif +} + +/* Replace an existing entry in the load map. */ +static __always_inline void +__dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr, + Elf32_Phdr *phdr) +{ + struct elf32_fdpic_loadseg *segdata; + void *oldaddr; + int i; + + for (i = 0; i < loadaddr.map->nsegs; i++) + if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr + && loadaddr.map->segs[i].p_memsz == phdr->p_memsz) + break; + if (i == loadaddr.map->nsegs) + _dl_exit (-1); + + segdata = loadaddr.map->segs + i; + oldaddr = (void *)segdata->addr; + _dl_munmap (oldaddr, segdata->p_memsz); + segdata->addr = (Elf32_Addr) addr; + +#if defined (__SUPPORT_LD_DEBUG__) + if (_dl_debug) + _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n", + loadaddr.map->nsegs-1, + segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz); #endif } diff --git a/ldso/ldso/frv/dl-sysdep.h b/ldso/ldso/frv/dl-sysdep.h index e9c847a69..206a66247 100644 --- a/ldso/ldso/frv/dl-sysdep.h +++ b/ldso/ldso/frv/dl-sysdep.h @@ -95,6 +95,8 @@ struct funcdesc_ht; #define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ (__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \ dl_init_loadaddr_load_count)) +#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \ + (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR))) #define DL_LOADADDR_UNMAP(LOADADDR, LEN) \ (__dl_loadaddr_unmap ((LOADADDR), (NULL))) #define DL_LIB_UNMAP(LIB, LEN) \ -- cgit v1.2.3 From feb7ce46ef24f74ebf0235f10127bd49f0c7e675 Mon Sep 17 00:00:00 2001 From: Bernd Schmidt Date: Mon, 11 Apr 2011 13:21:23 +0200 Subject: Support dynamic assignment of DSBT_INDEX For DSBT targets (C6X only at this point), we'd like to support the case where the user did not specify --dsbt-index at link time when building a shared library. The dynamic linker can still assign an index at runtime and fix up the DSBT_INDEX relocs, at the cost of startup time and memory space. Signed-off-by: Bernd Schmidt --- ldso/ldso/c6x/elfinterp.c | 6 +++++- ldso/ldso/dl-elf.c | 34 +++++++++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 6 deletions(-) (limited to 'ldso') diff --git a/ldso/ldso/c6x/elfinterp.c b/ldso/ldso/c6x/elfinterp.c index 7c79171ce..f5d3ad41e 100644 --- a/ldso/ldso/c6x/elfinterp.c +++ b/ldso/ldso/c6x/elfinterp.c @@ -198,6 +198,10 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, new_val = sym_val; *reloc_addr = sym_val; break; + case R_C6000_DSBT_INDEX: + new_val = (old_val & ~0x007fff00) | ((tpnt->loadaddr.map->dsbt_index & 0x7fff) << 8); + *reloc_addr = new_val; + break; case R_C6000_ABS_L16: new_val = (old_val & ~0x007fff80) | ((sym_val & 0xffff) << 7); *reloc_addr = new_val; @@ -224,7 +228,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope, (char *)symbol_addr, symtab[symtab_index].st_size); } - break; + return 0; default: return -1; /*call _dl_exit(1) */ } diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c index 91e8a97ca..7b5d75146 100644 --- a/ldso/ldso/dl-elf.c +++ b/ldso/ldso/dl-elf.c @@ -811,20 +811,44 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure, #ifdef __DSBT__ /* Handle DSBT initialization */ { - struct elf_resolve *t, *ref = NULL; + struct elf_resolve *t, *ref; int idx = tpnt->loadaddr.map->dsbt_index; unsigned *dsbt = tpnt->loadaddr.map->dsbt_table; if (idx == 0) { - /* This DSO has not been assigned an index */ - _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n", - _dl_progname, libname); - _dl_exit(1); + if (!dynamic_info[DT_TEXTREL]) { + /* This DSO has not been assigned an index. */ + _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n", + _dl_progname, libname); + _dl_exit(1); + } + /* Find a dsbt table from another module. */ + ref = NULL; + for (t = _dl_loaded_modules; t; t = t->next) { + if (ref == NULL && t != tpnt) { + ref = t; + break; + } + } + idx = tpnt->loadaddr.map->dsbt_size; + while (idx-- > 0) + if (!ref || ref->loadaddr.map->dsbt_table[idx] == NULL) + break; + if (idx <= 0) { + _dl_dprintf(2, "%s: '%s' caused DSBT table overflow!\n", + _dl_progname, libname); + _dl_exit(1); + } + _dl_if_debug_dprint("\n\tfile='%s'; assigned index %d\n", + libname, idx); + tpnt->loadaddr.map->dsbt_index = idx; + } /* * Setup dsbt slot for this module in dsbt of all modules. */ + ref = NULL; for (t = _dl_loaded_modules; t; t = t->next) { /* find a dsbt table from another module */ if (ref == NULL && t != tpnt) { -- cgit v1.2.3 From b228ddac5b221b7c474ed902bad124934e61a527 Mon Sep 17 00:00:00 2001 From: Maksim Rayskiy Date: Fri, 15 Apr 2011 10:25:40 -0700 Subject: MIPS LDSO: pass sym_ref parameter to _dl_find_hash() to support PROTECTED symbols _dl_find_hash() relies on sym_ref parameter to check if the looked-up symbol is protected. The code fixes a case when _dl_perform_mips_global_got_relocations() was calling _dl_find_hash() without providing sym_ref parameter. The bug was causing hangs if a library exporting non-protected symbol was earlier in link order than library with the same symbol declared as protected. Signed-off-by: Maksim Rayskiy Signed-off-by: Carmelo Amoroso --- ldso/ldso/mips/elfinterp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'ldso') diff --git a/ldso/ldso/mips/elfinterp.c b/ldso/ldso/mips/elfinterp.c index 2886f333d..82f740d53 100644 --- a/ldso/ldso/mips/elfinterp.c +++ b/ldso/ldso/mips/elfinterp.c @@ -378,8 +378,11 @@ void _dl_perform_mips_global_got_relocations(struct elf_resolve *tpnt, int lazy) *got_entry += (unsigned long) tpnt->loadaddr; } else { + struct symbol_ref sym_ref; + sym_ref.sym = sym; + sym_ref.tpnt = NULL; *got_entry = (unsigned long) _dl_find_hash(strtab + - sym->st_name, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, NULL); + sym->st_name, tpnt->symbol_scope, tpnt, ELF_RTYPE_CLASS_PLT, &sym_ref); } got_entry++; -- cgit v1.2.3 From 4580f142b4b1ce176320fe5c2c5bee09871a01e7 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Sat, 16 Apr 2011 10:02:00 -0700 Subject: dl-startup: fix typos in block comment Signed-off-by: Kevin Cernekee Signed-off-by: Carmelo Amoroso --- ldso/ldso/dl-startup.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'ldso') diff --git a/ldso/ldso/dl-startup.c b/ldso/ldso/dl-startup.c index a51b583a4..449266077 100644 --- a/ldso/ldso/dl-startup.c +++ b/ldso/ldso/dl-startup.c @@ -32,8 +32,8 @@ /* * The main trick with this program is that initially, we ourselves are not - * dynamicly linked. This means that we cannot access any global variables or - * call any functions. No globals initially, since the Global Offset Table + * dynamically linked. This means that we cannot access any global variables + * or call any functions. No globals initially, since the Global Offset Table * (GOT) is initialized by the linker assuming a virtual address of 0, and no * function calls initially since the Procedure Linkage Table (PLT) is not yet * initialized. @@ -55,12 +55,12 @@ * * Fortunately, the linker itself leaves a few clues lying around, and when the * kernel starts the image, there are a few further clues. First of all, there - * is Auxiliary Vector Table information sitting on which is provided to us by - * the kernel, and which includes information about the load address that the - * program interpreter was loaded at, the number of sections, the address the - * application was loaded at and so forth. Here this information is stored in - * the array auxvt. For details see linux/fs/binfmt_elf.c where it calls - * NEW_AUX_ENT() a bunch of time.... + * is Auxiliary Vector Table information sitting on the stack which is provided + * to us by the kernel, and which includes information about the address + * that the program interpreter was loaded at, the number of sections, the + * address the application was loaded at, and so forth. Here this information + * is stored in the array auxvt. For details see linux/fs/binfmt_elf.c where + * it calls NEW_AUX_ENT() a bunch of times.... * * Next, we need to find the GOT. On most arches there is a register pointing * to the GOT, but just in case (and for new ports) I've added some (slow) C -- cgit v1.2.3 From c5f87c865e72d527c85b879be67b5a6fc0cab349 Mon Sep 17 00:00:00 2001 From: Bernhard Reutner-Fischer Date: Tue, 3 May 2011 16:36:15 +0200 Subject: x86_64: silence warning if !TLS TODO: fix all other arches Signed-off-by: Bernhard Reutner-Fischer --- ldso/ldso/x86_64/elfinterp.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'ldso') diff --git a/ldso/ldso/x86_64/elfinterp.c b/ldso/ldso/x86_64/elfinterp.c index 27b1a15fd..efe9d546c 100644 --- a/ldso/ldso/x86_64/elfinterp.c +++ b/ldso/ldso/x86_64/elfinterp.c @@ -157,7 +157,9 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, int reloc_type; int symtab_index; char *symname; - struct elf_resolve *tls_tpnt = NULL; +#if defined USE_TLS && USE_TLS + struct elf_resolve *tls_tpnt; +#endif struct symbol_ref sym_ref; ElfW(Addr) *reloc_addr; ElfW(Addr) symbol_addr; @@ -186,13 +188,17 @@ _dl_do_reloc(struct elf_resolve *tpnt, struct dyn_elf *scope, /* This may be non-fatal if called from dlopen. */ return 1; } +#if defined USE_TLS && USE_TLS tls_tpnt = sym_ref.tpnt; +#endif } else { /* Relocs against STN_UNDEF are usually treated as using a * symbol value of zero, and using the module containing the * reloc itself. */ symbol_addr = sym_ref.sym->st_value; +#if defined USE_TLS && USE_TLS tls_tpnt = tpnt; +#endif } -- cgit v1.2.3