summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernd Schmidt <bernds@codesourcery.com>2011-04-11 13:13:18 +0200
committerBernd Schmidt <bernds@codesourcery.com>2011-04-11 13:26:48 +0200
commit56ea76b6bf190bffdc07aba90e4b25dfc096027b (patch)
tree8644959d652ecf68e190f0652cff04f44e88f715
parent85f4b028d767fc390a7b866d2f58d58be489242d (diff)
Fix nommu handling of DT_TEXTREL
We have a problem with DT_TEXTREL shared libraries on nommu machines. The dynamic linker's strategy is to map the text segment read-only first, then look for DT_TEXTREL, and use mprotect to change protections if necessary. This fails on nommu, since a nommu kernel can decide to share the memory for private read-only file mappings, and mprotect doesn't (can't) do anything about this sharing. Existing nommu targets apparently have no need for this, but on C6X, we may need to assign library indices at run-time if no --dsbt-index option was passed to the linker at build time. Hence, the following patch, which instead of using mprotect, redoes the mapping with PF_W set. Signed-off-by: Bernd Schmidt <bernds@codesourcery.com>
-rw-r--r--ldso/include/dl-elf.h2
-rw-r--r--ldso/include/ldso.h2
-rw-r--r--ldso/ldso/bfin/dl-inlines.h41
-rw-r--r--ldso/ldso/bfin/dl-sysdep.h2
-rw-r--r--ldso/ldso/c6x/dl-inlines.h29
-rw-r--r--ldso/ldso/c6x/dl-sysdep.h3
-rw-r--r--ldso/ldso/dl-elf.c284
-rw-r--r--ldso/ldso/frv/dl-inlines.h41
-rw-r--r--ldso/ldso/frv/dl-sysdep.h2
9 files changed, 248 insertions, 158 deletions
diff --git a/ldso/include/dl-elf.h b/ldso/include/dl-elf.h
index 7fbb373b4..e7203fd8f 100644
--- a/ldso/include/dl-elf.h
+++ b/ldso/include/dl-elf.h
@@ -184,7 +184,7 @@ unsigned int __dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info
ADJUST_DYN_INFO(DT_DSBT_BASE_IDX, load_off);
/* Initialize loadmap dsbt info. */
- load_off.map->dsbt_table = dynamic_info[DT_DSBT_BASE_IDX];
+ load_off.map->dsbt_table = (void *)dynamic_info[DT_DSBT_BASE_IDX];
load_off.map->dsbt_size = dynamic_info[DT_DSBT_SIZE_IDX];
load_off.map->dsbt_index = dynamic_info[DT_DSBT_INDEX_IDX];
#endif
diff --git a/ldso/include/ldso.h b/ldso/include/ldso.h
index 69b5dd75a..95bcd14a4 100644
--- a/ldso/include/ldso.h
+++ b/ldso/include/ldso.h
@@ -34,6 +34,8 @@
#include <sys/types.h>
/* Pull in the arch specific page size */
#include <bits/uClibc_page.h>
+/* Pull in the MIN macro */
+#include <sys/param.h>
/* Pull in the ldso syscalls and string functions */
#ifndef __ARCH_HAS_NO_SHARED__
#include <dl-syscall.h>
diff --git a/ldso/ldso/bfin/dl-inlines.h b/ldso/ldso/bfin/dl-inlines.h
index 6524f5edc..969986218 100644
--- a/ldso/ldso/bfin/dl-inlines.h
+++ b/ldso/ldso/bfin/dl-inlines.h
@@ -88,14 +88,39 @@ __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
segdata->p_memsz = phdr->p_memsz;
#if defined (__SUPPORT_LD_DEBUG__)
- {
- extern char *_dl_debug;
- extern int _dl_debug_file;
- if (_dl_debug)
- _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
- loadaddr.map->nsegs-1,
- segdata->p_vaddr, segdata->addr, segdata->p_memsz);
- }
+ if (_dl_debug)
+ _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
+ loadaddr.map->nsegs-1,
+ segdata->p_vaddr, segdata->addr, segdata->p_memsz);
+#endif
+}
+
+/* Replace an existing entry in the load map. */
+static __always_inline void
+__dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
+ Elf32_Phdr *phdr)
+{
+ struct elf32_fdpic_loadseg *segdata;
+ void *oldaddr;
+ int i;
+
+ for (i = 0; i < loadaddr.map->nsegs; i++)
+ if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr
+ && loadaddr.map->segs[i].p_memsz == phdr->p_memsz)
+ break;
+ if (i == loadaddr.map->nsegs)
+ _dl_exit (-1);
+
+ segdata = loadaddr.map->segs + i;
+ oldaddr = (void *)segdata->addr;
+ _dl_munmap (oldaddr, segdata->p_memsz);
+ segdata->addr = (Elf32_Addr) addr;
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug)
+ _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n",
+ loadaddr.map->nsegs-1,
+ segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz);
#endif
}
diff --git a/ldso/ldso/bfin/dl-sysdep.h b/ldso/ldso/bfin/dl-sysdep.h
index 50c750990..168e5c89a 100644
--- a/ldso/ldso/bfin/dl-sysdep.h
+++ b/ldso/ldso/bfin/dl-sysdep.h
@@ -120,6 +120,8 @@ struct funcdesc_ht;
#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \
(__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \
dl_init_loadaddr_load_count))
+#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \
+ (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR)))
#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \
(__dl_loadaddr_unmap ((LOADADDR), (NULL)))
#define DL_LIB_UNMAP(LIB, LEN) \
diff --git a/ldso/ldso/c6x/dl-inlines.h b/ldso/ldso/c6x/dl-inlines.h
index d8fb42c55..62e1cc9ca 100644
--- a/ldso/ldso/c6x/dl-inlines.h
+++ b/ldso/ldso/c6x/dl-inlines.h
@@ -74,6 +74,35 @@ __dl_init_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr,
#endif
}
+/* Replace an existing entry in the load map. */
+static __always_inline void
+__dl_update_loadaddr_hdr (struct elf32_dsbt_loadaddr loadaddr, void *addr,
+ Elf32_Phdr *phdr)
+{
+ struct elf32_dsbt_loadseg *segdata;
+ void *oldaddr;
+ int i;
+
+ for (i = 0; i < loadaddr.map->nsegs; i++)
+ if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr
+ && loadaddr.map->segs[i].p_memsz == phdr->p_memsz)
+ break;
+ if (i == loadaddr.map->nsegs)
+ _dl_exit (-1);
+
+ segdata = loadaddr.map->segs + i;
+ oldaddr = (void *)segdata->addr;
+ _dl_munmap (oldaddr, segdata->p_memsz);
+ segdata->addr = (Elf32_Addr) addr;
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug)
+ _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n",
+ loadaddr.map->nsegs-1,
+ segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz);
+#endif
+}
+
static __always_inline void
__dl_loadaddr_unmap (struct elf32_dsbt_loadaddr loadaddr)
{
diff --git a/ldso/ldso/c6x/dl-sysdep.h b/ldso/ldso/c6x/dl-sysdep.h
index 8f1b122d3..ff7accdf1 100644
--- a/ldso/ldso/c6x/dl-sysdep.h
+++ b/ldso/ldso/c6x/dl-sysdep.h
@@ -104,6 +104,9 @@ struct elf32_dsbt_loadaddr;
(__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \
dl_init_loadaddr_load_count))
+#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \
+ (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR)))
+
#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \
(__dl_loadaddr_unmap ((LOADADDR)))
diff --git a/ldso/ldso/dl-elf.c b/ldso/ldso/dl-elf.c
index 505247e6f..91e8a97ca 100644
--- a/ldso/ldso/dl-elf.c
+++ b/ldso/ldso/dl-elf.c
@@ -314,6 +314,121 @@ goof:
return NULL;
}
+/*
+ * Make a writeable mapping of a segment, regardless of whether PF_W is
+ * set or not.
+ */
+static void *
+map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags,
+ unsigned long libaddr)
+{
+ int prot_flags = ppnt->p_flags | PF_W;
+ char *status, *retval;
+ char *tryaddr;
+ ssize_t size;
+ unsigned long map_size;
+ char *cpnt;
+ char *piclib2map = NULL;
+
+ if (piclib == 2 &&
+ /* We might be able to avoid this call if memsz doesn't
+ require an additional page, but this would require mmap
+ to always return page-aligned addresses and a whole
+ number of pages allocated. Unfortunately on uClinux
+ may return misaligned addresses and may allocate
+ partial pages, so we may end up doing unnecessary mmap
+ calls.
+
+ This is what we could do if we knew mmap would always
+ return aligned pages:
+
+ ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) &
+ PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz)
+
+ Instead, we have to do this: */
+ ppnt->p_filesz < ppnt->p_memsz)
+ {
+ piclib2map = (char *)
+ _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz,
+ LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0);
+ if (_dl_mmap_check_error(piclib2map))
+ return 0;
+ }
+
+ tryaddr = piclib == 2 ? piclib2map
+ : ((char*) (piclib ? libaddr : 0) +
+ (ppnt->p_vaddr & PAGE_ALIGN));
+
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
+
+ /* For !MMU, mmap to fixed address will fail.
+ So instead of desperately call mmap and fail,
+ we set status to MAP_FAILED to save a call
+ to mmap (). */
+#ifndef __ARCH_USE_MMU__
+ if (piclib2map == 0)
+#endif
+ status = (char *) _dl_mmap
+ (tryaddr, size, LXFLAGS(prot_flags),
+ flags | (piclib2map ? MAP_FIXED : 0),
+ infile, ppnt->p_offset & OFFS_ALIGN);
+#ifndef __ARCH_USE_MMU__
+ else
+ status = MAP_FAILED;
+#endif
+#ifdef _DL_PREAD
+ if (_dl_mmap_check_error(status) && piclib2map
+ && (_DL_PREAD (infile, tryaddr, size,
+ ppnt->p_offset & OFFS_ALIGN) == size))
+ status = tryaddr;
+#endif
+ if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status))
+ return 0;
+
+ if (piclib2map)
+ retval = piclib2map;
+ else
+ retval = status;
+
+ /* Now we want to allocate and zero-out any data from the end
+ of the region we mapped in from the file (filesz) to the
+ end of the loadable segment (memsz). We may need
+ additional pages for memsz, that we map in below, and we
+ can count on the kernel to zero them out, but we have to
+ zero out stuff in the last page that we mapped in from the
+ file. However, we can't assume to have actually obtained
+ full pages from the kernel, since we didn't ask for them,
+ and uClibc may not give us full pages for small
+ allocations. So only zero out up to memsz or the end of
+ the page, whichever comes first. */
+
+ /* CPNT is the beginning of the memsz portion not backed by
+ filesz. */
+ cpnt = (char *) (status + size);
+
+ /* MAP_SIZE is the address of the
+ beginning of the next page. */
+ map_size = (ppnt->p_vaddr + ppnt->p_filesz
+ + ADDR_ALIGN) & PAGE_ALIGN;
+
+ _dl_memset (cpnt, 0,
+ MIN (map_size
+ - (ppnt->p_vaddr
+ + ppnt->p_filesz),
+ ppnt->p_memsz
+ - ppnt->p_filesz));
+
+ if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) {
+ tryaddr = map_size + (char*)(piclib ? libaddr : 0);
+ status = (char *) _dl_mmap(tryaddr,
+ ppnt->p_vaddr + ppnt->p_memsz - map_size,
+ LXFLAGS(prot_flags),
+ flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (_dl_mmap_check_error(status) || tryaddr != status)
+ return NULL;
+ }
+ return retval;
+}
/*
* Read one ELF library into memory, mmap it into the correct locations and
@@ -475,6 +590,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
if (_dl_mmap_check_error(status)) {
+ cant_map:
_dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
_dl_internal_error_number = LD_ERROR_MMAP_FAILED;
_dl_close(infile);
@@ -495,8 +611,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
char *addr;
addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
- if (addr == NULL)
+ if (addr == NULL) {
+ cant_map1:
+ DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
goto cant_map;
+ }
DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
ppnt++;
@@ -517,141 +636,9 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
}
if (ppnt->p_flags & PF_W) {
- unsigned long map_size;
- char *cpnt;
- char *piclib2map = 0;
-
- if (piclib == 2 &&
- /* We might be able to avoid this
- call if memsz doesn't require
- an additional page, but this
- would require mmap to always
- return page-aligned addresses
- and a whole number of pages
- allocated. Unfortunately on
- uClinux may return misaligned
- addresses and may allocate
- partial pages, so we may end up
- doing unnecessary mmap calls.
-
- This is what we could do if we
- knew mmap would always return
- aligned pages:
-
- ((ppnt->p_vaddr + ppnt->p_filesz
- + ADDR_ALIGN)
- & PAGE_ALIGN)
- < ppnt->p_vaddr + ppnt->p_memsz)
-
- Instead, we have to do this: */
- ppnt->p_filesz < ppnt->p_memsz)
- {
- piclib2map = (char *)
- _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
- + ppnt->p_memsz,
- LXFLAGS(ppnt->p_flags),
- flags | MAP_ANONYMOUS, -1, 0);
- if (_dl_mmap_check_error(piclib2map))
- goto cant_map;
- DL_INIT_LOADADDR_HDR
- (lib_loadaddr, piclib2map
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
- }
-
- tryaddr = piclib == 2 ? piclib2map
- : ((char*) (piclib ? libaddr : 0) +
- (ppnt->p_vaddr & PAGE_ALIGN));
-
- size = (ppnt->p_vaddr & ADDR_ALIGN)
- + ppnt->p_filesz;
-
- /* For !MMU, mmap to fixed address will fail.
- So instead of desperately call mmap and fail,
- we set status to MAP_FAILED to save a call
- to mmap (). */
-#ifndef __ARCH_USE_MMU__
- if (piclib2map == 0)
-#endif
- status = (char *) _dl_mmap
- (tryaddr, size, LXFLAGS(ppnt->p_flags),
- flags | (piclib2map ? MAP_FIXED : 0),
- infile, ppnt->p_offset & OFFS_ALIGN);
-#ifndef __ARCH_USE_MMU__
- else
- status = MAP_FAILED;
-#endif
-#ifdef _DL_PREAD
- if (_dl_mmap_check_error(status) && piclib2map
- && (_DL_PREAD (infile, tryaddr, size,
- ppnt->p_offset & OFFS_ALIGN)
- == size))
- status = tryaddr;
-#endif
- if (_dl_mmap_check_error(status)
- || (tryaddr && tryaddr != status)) {
- cant_map:
- _dl_dprintf(2, "%s:%i: can't map '%s'\n",
- _dl_progname, __LINE__, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
- }
-
- if (! piclib2map) {
- DL_INIT_LOADADDR_HDR
- (lib_loadaddr, status
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
- }
- /* Now we want to allocate and
- zero-out any data from the end of
- the region we mapped in from the
- file (filesz) to the end of the
- loadable segment (memsz). We may
- need additional pages for memsz,
- that we map in below, and we can
- count on the kernel to zero them
- out, but we have to zero out stuff
- in the last page that we mapped in
- from the file. However, we can't
- assume to have actually obtained
- full pages from the kernel, since
- we didn't ask for them, and uClibc
- may not give us full pages for
- small allocations. So only zero
- out up to memsz or the end of the
- page, whichever comes first. */
-
- /* CPNT is the beginning of the memsz
- portion not backed by filesz. */
- cpnt = (char *) (status + size);
-
- /* MAP_SIZE is the address of the
- beginning of the next page. */
- map_size = (ppnt->p_vaddr + ppnt->p_filesz
- + ADDR_ALIGN) & PAGE_ALIGN;
-
-#ifndef MIN
-# define MIN(a,b) ((a) < (b) ? (a) : (b))
-#endif
- _dl_memset (cpnt, 0,
- MIN (map_size
- - (ppnt->p_vaddr
- + ppnt->p_filesz),
- ppnt->p_memsz
- - ppnt->p_filesz));
-
- if (map_size < ppnt->p_vaddr + ppnt->p_memsz
- && !piclib2map) {
- tryaddr = map_size + (char*)(piclib ? libaddr : 0);
- status = (char *) _dl_mmap(tryaddr,
- ppnt->p_vaddr + ppnt->p_memsz - map_size,
- LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
- if (_dl_mmap_check_error(status)
- || tryaddr != status)
- goto cant_map;
- }
+ status = map_writeable (infile, ppnt, piclib, flags, libaddr);
+ if (status == NULL)
+ goto cant_map1;
} else {
tryaddr = (piclib == 2 ? 0
: (char *) (ppnt->p_vaddr & PAGE_ALIGN)
@@ -664,11 +651,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
infile, ppnt->p_offset & OFFS_ALIGN);
if (_dl_mmap_check_error(status)
|| (tryaddr && tryaddr != status))
- goto cant_map;
- DL_INIT_LOADADDR_HDR
- (lib_loadaddr, status
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+ goto cant_map1;
}
+ DL_INIT_LOADADDR_HDR(lib_loadaddr,
+ status + (ppnt->p_vaddr & ADDR_ALIGN),
+ ppnt);
/* if (libaddr == 0 && piclib) {
libaddr = (unsigned long) status;
@@ -677,7 +664,6 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
}
ppnt++;
}
- _dl_close(infile);
/* For a non-PIC library, the addresses are all absolute */
if (piclib) {
@@ -696,6 +682,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
_dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
_dl_progname, libname);
_dl_munmap(header, _dl_pagesize);
+ _dl_close(infile);
return NULL;
}
@@ -711,10 +698,23 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
+#ifdef __ARCH_USE_MMU__
_dl_mprotect((void *) ((piclib ? libaddr : 0) +
(ppnt->p_vaddr & PAGE_ALIGN)),
(ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
PROT_READ | PROT_WRITE | PROT_EXEC);
+#else
+ void *new_addr;
+ new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr);
+ if (!new_addr) {
+ _dl_dprintf(_dl_debug_file, "Can't modify %s's text section.",
+ libname);
+ _dl_exit(1);
+ }
+ DL_UPDATE_LOADADDR_HDR(lib_loadaddr,
+ new_addr + (ppnt->p_vaddr & ADDR_ALIGN),
+ ppnt);
+#endif
}
}
#else
@@ -725,6 +725,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
#endif
}
+ _dl_close(infile);
+
tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
dynamic_addr, 0);
tpnt->relro_addr = relro_addr;
diff --git a/ldso/ldso/frv/dl-inlines.h b/ldso/ldso/frv/dl-inlines.h
index 95233a7c0..0395a7e23 100644
--- a/ldso/ldso/frv/dl-inlines.h
+++ b/ldso/ldso/frv/dl-inlines.h
@@ -72,14 +72,39 @@ __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
segdata->p_memsz = phdr->p_memsz;
#if defined (__SUPPORT_LD_DEBUG__)
- {
- extern char *_dl_debug;
- extern int _dl_debug_file;
- if (_dl_debug)
- _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
- loadaddr.map->nsegs-1,
- segdata->p_vaddr, segdata->addr, segdata->p_memsz);
- }
+ if (_dl_debug)
+ _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
+ loadaddr.map->nsegs-1,
+ segdata->p_vaddr, segdata->addr, segdata->p_memsz);
+#endif
+}
+
+/* Replace an existing entry in the load map. */
+static __always_inline void
+__dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
+ Elf32_Phdr *phdr)
+{
+ struct elf32_fdpic_loadseg *segdata;
+ void *oldaddr;
+ int i;
+
+ for (i = 0; i < loadaddr.map->nsegs; i++)
+ if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr
+ && loadaddr.map->segs[i].p_memsz == phdr->p_memsz)
+ break;
+ if (i == loadaddr.map->nsegs)
+ _dl_exit (-1);
+
+ segdata = loadaddr.map->segs + i;
+ oldaddr = (void *)segdata->addr;
+ _dl_munmap (oldaddr, segdata->p_memsz);
+ segdata->addr = (Elf32_Addr) addr;
+
+#if defined (__SUPPORT_LD_DEBUG__)
+ if (_dl_debug)
+ _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n",
+ loadaddr.map->nsegs-1,
+ segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz);
#endif
}
diff --git a/ldso/ldso/frv/dl-sysdep.h b/ldso/ldso/frv/dl-sysdep.h
index e9c847a69..206a66247 100644
--- a/ldso/ldso/frv/dl-sysdep.h
+++ b/ldso/ldso/frv/dl-sysdep.h
@@ -95,6 +95,8 @@ struct funcdesc_ht;
#define DL_INIT_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \
(__dl_init_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR), \
dl_init_loadaddr_load_count))
+#define DL_UPDATE_LOADADDR_HDR(LOADADDR, ADDR, PHDR) \
+ (__dl_update_loadaddr_hdr ((LOADADDR), (ADDR), (PHDR)))
#define DL_LOADADDR_UNMAP(LOADADDR, LEN) \
(__dl_loadaddr_unmap ((LOADADDR), (NULL)))
#define DL_LIB_UNMAP(LIB, LEN) \