summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libc/stdlib/malloc/Makefile2
-rw-r--r--libc/stdlib/malloc/free.c113
-rw-r--r--libc/stdlib/malloc/heap.h70
-rw-r--r--libc/stdlib/malloc/heap_append_free.c71
-rw-r--r--libc/stdlib/malloc/heap_free.c6
-rw-r--r--libc/stdlib/malloc/malloc.c110
-rw-r--r--libc/stdlib/malloc/malloc.h35
-rw-r--r--libc/stdlib/malloc/realloc.c17
8 files changed, 234 insertions, 190 deletions
diff --git a/libc/stdlib/malloc/Makefile b/libc/stdlib/malloc/Makefile
index 710f70297..6d1e89186 100644
--- a/libc/stdlib/malloc/Makefile
+++ b/libc/stdlib/malloc/Makefile
@@ -25,7 +25,7 @@ TOPDIR=../../../
include $(TOPDIR)Rules.mak
CSRC = malloc.o free.o realloc.o calloc.o heap_alloc.o \
- heap_alloc_at.o heap_free.o heap_append_free.o
+ heap_alloc_at.o heap_free.o
COBJS=$(patsubst %.c,%.o, $(CSRC))
OBJS=$(COBJS)
diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
index 4721d0269..41deed737 100644
--- a/libc/stdlib/malloc/free.c
+++ b/libc/stdlib/malloc/free.c
@@ -12,24 +12,119 @@
*/
#include <stdlib.h>
+#include <unistd.h>
#include <sys/mman.h>
#include "malloc.h"
#include "heap.h"
+/* Try to release the free-area FA in HEAP back to the system. */
+static void
+unmap_free_area (struct heap *heap, struct heap_free_area *fa)
+{
+ unsigned long start, end;
+#ifndef MALLOC_USE_SBRK
+ unsigned long unmap_start, unmap_end;
+#endif
+
+ end = (unsigned long)HEAP_FREE_AREA_END (fa);
+
+#ifdef MALLOC_USE_SBRK
+ /* When using sbrk, we only shrink the heap from the end. It would be
+ possible to allow _both_ -- shrinking via sbrk when possible, and
+ otherwise shrinking via munmap, but this results in holes in memory
+ that prevent the brk from every growing back down; since we only ever
+ grow the heap via sbrk, this tends to produce a continuously growing
+ brk (though the actual memory is unmapped), which could eventually run
+ out of address space. Note that `sbrk(0)' shouldn't normally do a
+ system call, so this test is reasonably cheap. */
+ if ((void *)end != sbrk (0))
+ {
+ MALLOC_DEBUG (" not unmapping: 0x%lx - 0x%lx (%d bytes)\n",
+ (unsigned long)HEAP_FREE_AREA_START (fa),
+ (unsigned long)HEAP_FREE_AREA_END (fa),
+ fa->size);
+ return;
+ }
+#endif
+
+ start = (unsigned long)HEAP_FREE_AREA_START (fa);
+
+ MALLOC_DEBUG (" unmapping: 0x%lx - 0x%lx (%ld bytes)\n",
+ start, end, end - start);
+
+ /* Remove FA from the heap. */
+ __heap_unlink_free_area (heap, fa);
+
+ if (!fa->next && !fa->prev)
+ /* We want to avoid the heap from losing all memory, so reserve a bit.
+ This test is only a heuristic -- the existance of another free area,
+ even if it's smaller than MALLOC_MIN_SIZE, will cause us not to
+ reserve anything. */
+ {
+ /* Put the reserved memory back in the heap; we asssume that
+ MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so we use
+ the latter unconditionally here. */
+ __heap_free (heap, (void *)start, MALLOC_MIN_SIZE);
+ start += MALLOC_MIN_SIZE;
+ }
+
+#ifdef MALLOC_USE_SBRK
+
+ sbrk (start - end);
+
+#else /* !MALLOC_USE_SBRK */
+
+ /* MEM/LEN may not be page-aligned, so we have to page-align them, and
+ return any left-over bits on the end to the heap. */
+ unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
+ unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end);
+
+ /* We have to be careful that any left-over bits are large enough to
+ return. Note that we _don't check_ to make sure there's room to
+ grow/shrink the start/end by another page, we just assume that the
+ unmap threshold is high enough so that this is always safe (i.e., it
+ should probably be at least 3 pages). */
+ if (unmap_start > start)
+ {
+ if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
+ unmap_start += MALLOC_PAGE_SIZE;
+ __heap_free (heap, (void *)start, unmap_start - start);
+ }
+ if (end > unmap_end)
+ {
+ if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
+ unmap_end -= MALLOC_PAGE_SIZE;
+ __heap_free (heap, (void *)unmap_end, end - unmap_end);
+ }
+
+ if (unmap_end > unmap_start)
+ munmap ((void *)unmap_start, unmap_end - unmap_start);
+
+#endif /* MALLOC_USE_SBRK */
+}
+
+
void free (void *mem)
{
- size_t size;
+ if (mem)
+ {
+ size_t size;
+ struct heap_free_area *fa;
+
+ mem -= MALLOC_ALIGNMENT;
+ size = *(size_t *)mem;
- mem = (size_t *)mem - 1;
- size = *(size_t *)mem;
+ MALLOC_DEBUG ("free: 0x%lx (base = 0x%lx, total_size = %d)\n",
+ (long)mem + MALLOC_ALIGNMENT, (long)mem, size);
- MALLOC_DEBUG ("free: 0x%lx (base = 0x%lx, total_size = %d)\n",
- (long)mem + sizeof (size_t), (long)mem, size);
+ fa = __heap_free (&__malloc_heap, mem, size);
- if (size >= MALLOC_MMAP_THRESHOLD)
- munmap (mem, size);
- else
- __heap_free (&__malloc_heap, mem, size);
+ /* Now we check to see if FA has grown big enough that it should be
+ unmapped. */
+ if (HEAP_FREE_AREA_SIZE (fa) >= MALLOC_UNMAP_THRESHOLD)
+ /* Get rid of it. */
+ unmap_free_area (&__malloc_heap, fa);
+ }
}
diff --git a/libc/stdlib/malloc/heap.h b/libc/stdlib/malloc/heap.h
index 0d6465050..589bf42b0 100644
--- a/libc/stdlib/malloc/heap.h
+++ b/libc/stdlib/malloc/heap.h
@@ -29,10 +29,9 @@ typedef int heap_mutex_t;
#endif
-
-/* The unit in which allocation is done, due to alignment constraints, etc.
- All allocation requests are rounded up to a multiple of this size.
- Must be a power of 2. */
+/* The heap allocates in multiples of, and aligned to, HEAP_GRANULARITY.
+ HEAP_GRANULARITY must be a power of 2. Malloc depends on this being the
+ same as MALLOC_ALIGNMENT. */
#define HEAP_GRANULARITY (sizeof (double))
@@ -40,10 +39,11 @@ typedef int heap_mutex_t;
of memory can be allocated. */
struct heap
{
+ /* A list of memory in the heap available for allocation. */
struct heap_free_area *free_areas;
+
heap_mutex_t lock;
};
-
#define HEAP_INIT { 0, HEAP_MUTEX_INIT }
@@ -61,6 +61,8 @@ struct heap_free_area
/* Return the address of the beginning of the frea area FA. FA is
evaulated multiple times. */
#define HEAP_FREE_AREA_START(fa) ((void *)((char *)(fa + 1) - (fa)->size))
+/* Return the size of the frea area FA. */
+#define HEAP_FREE_AREA_SIZE(fa) ((fa)->size)
/* Rounds SZ up to be a multiple of HEAP_GRANULARITY. */
@@ -74,6 +76,8 @@ struct heap_free_area
(sizeof (struct heap_free_area) + HEAP_ADJUST_SIZE (1))
+/* Change this to `#if 1' to cause the heap routines to emit debugging info
+ to stderr. */
#if 0
#include <stdio.h>
static void HEAP_DEBUG (struct heap *heap, const char *str)
@@ -81,18 +85,24 @@ static void HEAP_DEBUG (struct heap *heap, const char *str)
static int recursed = 0;
if (! recursed)
{
- struct heap_free_area *fa;
+ struct heap_free_area *fa, *prev;
recursed = 1;
fprintf (stderr, " %s: heap @0x%lx:\n", str, (long)heap);
- for (fa = heap->free_areas; fa; fa = fa->next)
- fprintf (stderr,
- " 0x%lx: 0x%lx - 0x%lx (%d)\tN=0x%lx, P=0x%lx\n",
- (long)fa,
- (long)HEAP_FREE_AREA_START (fa),
- (long)HEAP_FREE_AREA_END (fa),
- fa->size,
- (long)fa->prev,
- (long)fa->next);
+ for (prev = 0, fa = heap->free_areas; fa; prev = fa, fa = fa->next)
+ {
+ fprintf (stderr,
+ " 0x%lx: 0x%lx - 0x%lx (%d)\tP=0x%lx, N=0x%lx\n",
+ (long)fa,
+ (long)HEAP_FREE_AREA_START (fa),
+ (long)HEAP_FREE_AREA_END (fa),
+ fa->size,
+ (long)fa->prev,
+ (long)fa->next);
+ if (fa->prev != prev)
+ fprintf (stderr,
+ " PREV POINTER CORRUPTED!!!! P=0x%lx should be 0x%lx\n",
+ (long)fa->prev, (long)prev);
+ }
recursed = 0;
}
}
@@ -101,6 +111,18 @@ static void HEAP_DEBUG (struct heap *heap, const char *str)
#endif
+/* Remove the free-area FA from HEAP. */
+extern inline void
+__heap_unlink_free_area (struct heap *heap, struct heap_free_area *fa)
+{
+ if (fa->next)
+ fa->next->prev = fa->prev;
+ if (fa->prev)
+ fa->prev->next = fa->next;
+ else
+ heap->free_areas = fa->next;
+}
+
/* Allocate SIZE bytes from the front of the free-area FA in HEAP, and
return the amount actually allocated (which may be more than SIZE). */
extern inline size_t
@@ -113,12 +135,7 @@ __heap_free_area_alloc (struct heap *heap,
/* There's not enough room left over in FA after allocating the block, so
just use the whole thing, removing it from the list of free areas. */
{
- if (fa->next)
- fa->next->prev = fa->prev;
- if (fa->prev)
- fa->prev->next = fa->next;
- else
- heap->free_areas = fa->next;
+ __heap_unlink_free_area (heap, fa);
/* Remember that we've alloced the whole area. */
size = fa_size;
}
@@ -139,10 +156,7 @@ extern void *__heap_alloc (struct heap *heap, size_t *size);
allocated, or 0 if we failed. */
extern size_t __heap_alloc_at (struct heap *heap, void *mem, size_t size);
-/* Return the memory area MEM of size SIZE to HEAP. */
-extern void __heap_free (struct heap *heap, void *mem, size_t size);
-
-/* If the memory area MEM, of size SIZE, immediately follows an existing
- free-area in HEAP, use it to extend that free-area, and return true;
- otherwise return false. */
-extern int __heap_append_free (struct heap *heap, void *mem, size_t size);
+/* Return the memory area MEM of size SIZE to HEAP.
+ Returns the heap free area into which the memory was placed. */
+extern struct heap_free_area *__heap_free (struct heap *heap,
+ void *mem, size_t size);
diff --git a/libc/stdlib/malloc/heap_append_free.c b/libc/stdlib/malloc/heap_append_free.c
deleted file mode 100644
index d67f46495..000000000
--- a/libc/stdlib/malloc/heap_append_free.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * libc/stdlib/malloc/heap_append_free.c -- append memory to a heap free area
- *
- * Copyright (C) 2002 NEC Corporation
- * Copyright (C) 2002 Miles Bader <miles@gnu.org>
- *
- * This file is subject to the terms and conditions of the GNU Lesser
- * General Public License. See the file COPYING.LIB in the main
- * directory of this archive for more details.
- *
- * Written by Miles Bader <miles@gnu.org>
- */
-
-#include <stdlib.h>
-
-#include "heap.h"
-
-
-/* If the block MEM, of size SIZE, immediately follows an existing free-area
- in HEAP, use it to extend that free-area, and return true; otherwise return
- false. */
-int
-__heap_append_free (struct heap *heap, void *mem, size_t size)
-{
- int success = 0;
- struct heap_free_area *fa;
-
- __heap_lock (heap);
-
- HEAP_DEBUG (heap, "before __heap_append_free");
-
- /* Find an adjacent free-list entry. */
- for (fa = heap->free_areas; fa; fa = fa->next)
- if (HEAP_FREE_AREA_END (fa) == mem)
- /* MEM follows FA, extend FA to include it. Since the descriptor for FA
- is located at the end, we must actually write a new descriptor. Note
- that we _don't_ handle the case where the extended FA can be merged
- with a following free area; this is because this function is
- generally only used in cases were we believe that usually won't
- happen (it doesn't cause any incorrectness, and the two blocks can be
- merged by __heap_free later). */
- {
- struct heap_free_area *next_fa = fa->next;
- struct heap_free_area *prev_fa = fa->prev;
- size_t fa_size = fa->size;
- struct heap_free_area *new_fa =
- (struct heap_free_area *)((char *)fa + size);
-
- /* Update surrounding free-areas to point to FA's new address. */
- if (prev_fa)
- prev_fa->next = new_fa;
- else
- heap->free_areas = new_fa;
- if (next_fa)
- next_fa->prev = new_fa;
-
- /* Fill in the moved descriptor. */
- new_fa->prev = prev_fa;
- new_fa->next = next_fa;
- new_fa->size = fa_size + size;
-
- success = 1;
- break;
- }
-
- HEAP_DEBUG (heap, "after __heap_append_free");
-
- __heap_unlock (heap);
-
- return success;
-}
diff --git a/libc/stdlib/malloc/heap_free.c b/libc/stdlib/malloc/heap_free.c
index d8eaf7e66..ac7e00be3 100644
--- a/libc/stdlib/malloc/heap_free.c
+++ b/libc/stdlib/malloc/heap_free.c
@@ -17,7 +17,7 @@
/* Return the memory area MEM of size SIZE to HEAP. */
-void
+struct heap_free_area *
__heap_free (struct heap *heap, void *mem, size_t size)
{
struct heap_free_area *prev_fa, *fa, *new_fa;
@@ -120,8 +120,12 @@ __heap_free (struct heap *heap, void *mem, size_t size)
if (fa)
fa->prev = new_fa;
+ fa = new_fa;
+
done:
HEAP_DEBUG (heap, "after __heap_free");
__heap_unlock (heap);
+
+ return fa;
}
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
index 32d56c153..0e84bf646 100644
--- a/libc/stdlib/malloc/malloc.c
+++ b/libc/stdlib/malloc/malloc.c
@@ -12,25 +12,14 @@
*/
#include <stdlib.h>
+#include <unistd.h>
#include <sys/mman.h>
#include "malloc.h"
#include "heap.h"
-/* When we give memory to the heap, start this many bytes after the
- beginning of the mmaped block. This is because we must ensure that
- malloc return values are aligned to MALLOC_ALIGNMENT, but since we need
- to use one word _before_ the beginning of that, we actually want the heap
- to return values that are MALLOC_ALIGNMENT aligned - sizeof (size_t).
- Since the heap always allocates in multiples of HEAP_GRANULARITY, we can
- do this by (1) ensuring that HEAP_GRANULARITY is a multiple of
- MALLOC_ALIGNMENT, and (2) making sure that the heap's free areas start
- sizeof(size_t) bytes before our required alignment. */
-#define MALLOC_HEAP_BLOCK_SHIM (MALLOC_ALIGNMENT - sizeof (size_t))
-
-
-/* The heap used for small allocations. */
+/* The malloc heap. */
struct heap __malloc_heap = HEAP_INIT;
@@ -40,56 +29,57 @@ void *malloc (size_t size)
MALLOC_DEBUG ("malloc: %d bytes\n", size);
- /* Include an extra word to record the size of the allocated block. */
- size += sizeof (size_t);
-
- if (size >= MALLOC_MMAP_THRESHOLD)
- /* Use mmap for large allocations. */
- {
- /* Make sure we request enough memory to align the result correctly,
- and that SIZE reflects that mmap hands back whole pages. */
- size += MALLOC_ROUND_UP_TO_PAGE_SIZE (MALLOC_ALIGNMENT - sizeof(size_t));
+ /* Include extra space to record the size of the allocated block. */
+ size += MALLOC_ROUND_UP (sizeof (size_t), MALLOC_ALIGNMENT);
- mem = mmap (0, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, 0, 0);
- if (mem == MAP_FAILED)
- return 0;
- }
- else
- /* Use the heap for small allocations. */
+ mem = __heap_alloc (&__malloc_heap, &size);
+ if (! mem)
+ /* We couldn't allocate from the heap, so get some more memory
+ from the system, add it to the heap, and try again. */
{
- mem = __heap_alloc (&__malloc_heap, &size);
-
- if (! mem)
- /* We couldn't allocate from the heap, so get some more memory
- from the system, add it to the heap, and try again. */
+ /* If we're trying to allocate a block bigger than the default
+ MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
+ size_t block_size
+ = (size < MALLOC_HEAP_EXTEND_SIZE
+ ? MALLOC_HEAP_EXTEND_SIZE
+ : MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
+ /* Allocate the new heap block. */
+#ifdef MALLOC_USE_SBRK
+ /* Use sbrk we can, as it's faster than mmap, and guarantees
+ contiguous allocation. */
+ void *block = sbrk (block_size);
+#else
+ /* Otherwise, use mmap. */
+ void *block = mmap (0, block_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, 0, 0);
+#endif
+
+ if (block != (void *)-1)
{
- /* If we're trying to allocate a block bigger than the default
- MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
- size_t block_size = (size < MALLOC_HEAP_EXTEND_SIZE
- ? MALLOC_HEAP_EXTEND_SIZE
- : MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
- /* Allocate the new heap block. */
- void *block = mmap (0, block_size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, 0, 0);
-
- if (block != MAP_FAILED)
+#ifdef MALLOC_USE_SBRK
+ /* Because sbrk can return results of arbitrary
+ alignment, align the result to a MALLOC_ALIGNMENT boundary. */
+ long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
+ if (block != (void *)aligned_block)
+ /* Have to adjust. We should only have to actually do this
+ the first time (after which we will have aligned the brk
+ correctly). */
{
- /* Put BLOCK into the heap. We first try to append BLOCK to
- an existing free area, which is more efficient because it
- doesn't require using a `shim' at the beginning (which
- would prevent merging free-areas); since mmap often returns
- contiguous areas, this is worth it. */
- if (! __heap_append_free (&__malloc_heap, block, block_size))
- /* Couldn't append, just add BLOCK as a new free-area. */
- __heap_free (&__malloc_heap,
- block + MALLOC_HEAP_BLOCK_SHIM,
- block_size - MALLOC_HEAP_BLOCK_SHIM);
-
- /* Try again to allocate. */
- mem = __heap_alloc (&__malloc_heap, &size);
+ /* Move the brk to reflect the alignment; our next allocation
+ should start on exactly the right alignment. */
+ sbrk (aligned_block - (long)block);
+ block = (void *)aligned_block;
}
+#endif /* MALLOC_USE_SBRK */
+
+ MALLOC_DEBUG (" adding memory: 0x%lx - 0x%lx (%d bytes)\n",
+ (long)block, (long)block + block_size, block_size);
+
+ /* Put BLOCK into the heap. */
+ __heap_free (&__malloc_heap, block, block_size);
+
+ /* Try again to allocate. */
+ mem = __heap_alloc (&__malloc_heap, &size);
}
}
@@ -97,10 +87,10 @@ void *malloc (size_t size)
/* Record the size of this block just before the returned address. */
{
*(size_t *)mem = size;
- mem = (size_t *)mem + 1;
+ mem += MALLOC_ALIGNMENT;
MALLOC_DEBUG (" malloc: returning 0x%lx (base:0x%lx, total_size:%d)\n",
- (long)mem, (long)mem - sizeof (size_t), size);
+ (long)mem, (long)mem - MALLOC_ALIGNMENT, size);
}
return mem;
diff --git a/libc/stdlib/malloc/malloc.h b/libc/stdlib/malloc/malloc.h
index 187477a67..0cab1dba2 100644
--- a/libc/stdlib/malloc/malloc.h
+++ b/libc/stdlib/malloc/malloc.h
@@ -23,11 +23,26 @@
satisfy a particularly big request). */
#define MALLOC_HEAP_EXTEND_SIZE MALLOC_PAGE_SIZE
-/* The threshold above which blocks are allocated/freed with mmap/munmap,
- rather than using the heap. */
-#define MALLOC_MMAP_THRESHOLD (8*MALLOC_PAGE_SIZE)
+/* When a heap free-area grows above this size, try to unmap it, releasing
+ the memory back to the system. */
+#define MALLOC_UNMAP_THRESHOLD (8*MALLOC_PAGE_SIZE)
+/* When unmapping a free-area, retain this many bytes if it's the only one,
+ to avoid completely emptying the heap. This is only a heuristic -- the
+ existance of another free area, even if it's smaller than
+ MALLOC_MIN_SIZE, will cause us not to reserve anything. */
+#define MALLOC_MIN_SIZE (2*MALLOC_PAGE_SIZE)
+/* For systems with an MMU, use sbrk to map/unmap memory for the malloc
+ heap, instead of mmap/munmap. This is a tradeoff -- sbrk is faster than
+ mmap/munmap, and guarantees contiguous allocation, but is also less
+ flexible, and causes the heap to only be shrinkable from the end. */
+#ifdef __UCLIBC_HAS_MMU__
+#define MALLOC_USE_SBRK
+#endif
+
+
+/* Change this to `#if 1' to cause malloc to emit debugging info to stderr. */
#if 0
#include <stdio.h>
#define MALLOC_DEBUG(fmt, args...) fprintf (stderr, fmt , ##args)
@@ -36,10 +51,20 @@
#endif
+/* Return SZ rounded down to POWER_OF_2_SIZE (which must be power of 2). */
+#define MALLOC_ROUND_DOWN(sz, power_of_2_size) \
+ ((sz) & ~(power_of_2_size - 1))
+/* Return SZ rounded to POWER_OF_2_SIZE (which must be power of 2). */
+#define MALLOC_ROUND_UP(sz, power_of_2_size) \
+ MALLOC_ROUND_DOWN ((sz) + (power_of_2_size - 1), (power_of_2_size))
+
+/* Return SZ rounded down to a multiple MALLOC_PAGE_SIZE. */
+#define MALLOC_ROUND_DOWN_TO_PAGE_SIZE(sz) \
+ MALLOC_ROUND_DOWN (sz, MALLOC_PAGE_SIZE)
/* Return SZ rounded up to a multiple MALLOC_PAGE_SIZE. */
#define MALLOC_ROUND_UP_TO_PAGE_SIZE(sz) \
- (((sz) + (MALLOC_PAGE_SIZE - 1)) & ~(MALLOC_PAGE_SIZE - 1))
+ MALLOC_ROUND_UP (sz, MALLOC_PAGE_SIZE)
-/* The heap used for small allocations. */
+/* The malloc heap. */
extern struct heap __malloc_heap;
diff --git a/libc/stdlib/malloc/realloc.c b/libc/stdlib/malloc/realloc.c
index f18ed9eb6..091b3392b 100644
--- a/libc/stdlib/malloc/realloc.c
+++ b/libc/stdlib/malloc/realloc.c
@@ -25,7 +25,7 @@ void *realloc (void *mem, size_t new_size)
return malloc (new_size);
else
{
- void *base_mem = (size_t *)mem - 1;
+ void *base_mem = mem - MALLOC_ALIGNMENT;
size_t size = *(size_t *)base_mem;
MALLOC_DEBUG ("realloc: 0x%lx, %d (base = 0x%lx, total_size = %d)\n",
@@ -39,20 +39,7 @@ void *realloc (void *mem, size_t new_size)
size_t ext_size = new_size - size;
void *ext_addr = (char *)base_mem + ext_size;
- if (size >= MALLOC_MMAP_THRESHOLD)
- /* Try to extend this block in place using mmap. */
- {
- ext_size += MALLOC_ROUND_UP_TO_PAGE_SIZE (ext_size);
-
- new_mem = mmap (ext_addr, ext_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS, 0, 0);
- if (new_mem == MAP_FAILED)
- /* Can't do it. */
- ext_size = 0;
- }
- else
- ext_size = __heap_alloc_at (&__malloc_heap, ext_addr, ext_size);
-
+ ext_size = __heap_alloc_at (&__malloc_heap, ext_addr, ext_size);
if (! ext_size)
/* Our attempts to extend MEM in place failed, just
allocate-and-copy. */