diff options
author | Rob Landley <rob@landley.net> | 2008-10-28 06:48:06 +0000 |
---|---|---|
committer | Rob Landley <rob@landley.net> | 2008-10-28 06:48:06 +0000 |
commit | 7d602faf7652cbd8358ff90a9eaa53ac5230dabe (patch) | |
tree | 6739a0f15b6a24fc94fdf2b6a16a2807a405a976 /libc/stdlib/malloc | |
parent | 346792d2a1494d3451837c205343c2ecbf5ee810 (diff) |
Finally fix the MALLOC=y and MALLOC_SIMPLE=y breakage from svn 23660. (I found it, this is Bernhard's patch to fix it. Tested and it Works For Me (tm)).
Diffstat (limited to 'libc/stdlib/malloc')
-rw-r--r-- | libc/stdlib/malloc/free.c | 41 | ||||
-rw-r--r-- | libc/stdlib/malloc/heap.h | 30 | ||||
-rw-r--r-- | libc/stdlib/malloc/heap_alloc.c | 8 | ||||
-rw-r--r-- | libc/stdlib/malloc/heap_alloc_at.c | 8 | ||||
-rw-r--r-- | libc/stdlib/malloc/heap_free.c | 8 | ||||
-rw-r--r-- | libc/stdlib/malloc/malloc.c | 33 | ||||
-rw-r--r-- | libc/stdlib/malloc/memalign.c | 2 | ||||
-rw-r--r-- | libc/stdlib/malloc/realloc.c | 12 |
8 files changed, 68 insertions, 74 deletions
diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c index 4713b0624..91552919e 100644 --- a/libc/stdlib/malloc/free.c +++ b/libc/stdlib/malloc/free.c @@ -21,12 +21,18 @@ libc_hidden_proto(sbrk) #include "malloc.h" #include "heap.h" -static void #ifdef HEAP_USE_LOCKING -free_to_heap (void *mem, struct heap_free_area *heap, malloc_mutex_t *heap_lock) +#define free_to_heap(mem, heap, lck) __free_to_heap(mem, heap, lck) #else -free_to_heap (void *mem, struct heap_free_area *heap) +#define free_to_heap(mem, heap, lck) __free_to_heap(mem, heap) +#endif + +static void +__free_to_heap (void *mem, struct heap_free_area **heap +#ifdef HEAP_USE_LOCKING + , malloc_mutex_t *heap_lock #endif + ) { size_t size; struct heap_free_area *fa; @@ -43,7 +49,7 @@ free_to_heap (void *mem, struct heap_free_area *heap) size = MALLOC_SIZE (mem); mem = MALLOC_BASE (mem); - __heap_do_lock (heap_lock); + __heap_lock (heap_lock); /* Put MEM back in the heap, and get the free-area it was placed in. */ fa = __heap_free (heap, mem, size); @@ -52,7 +58,7 @@ free_to_heap (void *mem, struct heap_free_area *heap) unmapped. */ if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD) /* Nope, nothing left to do, just release the lock. */ - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); else /* Yup, try to unmap FA. */ { @@ -85,7 +91,7 @@ free_to_heap (void *mem, struct heap_free_area *heap) MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)", start, end, end - start); __malloc_unlock_sbrk (); - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); return; } #endif @@ -102,7 +108,7 @@ free_to_heap (void *mem, struct heap_free_area *heap) another free area, even if it's smaller than MALLOC_MIN_SIZE, will cause us not to reserve anything. */ { - /* Put the reserved memory back in the heap; we asssume that + /* Put the reserved memory back in the heap; we assume that MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so we use the latter unconditionally here. */ __heap_free (heap, (void *)start, MALLOC_MIN_SIZE); @@ -112,7 +118,7 @@ free_to_heap (void *mem, struct heap_free_area *heap) #ifdef MALLOC_USE_SBRK /* Release the heap lock; we're still holding the sbrk lock. */ - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); /* Lower the brk. */ sbrk (start - end); /* Release the sbrk lock too; now we hold no locks. */ @@ -176,20 +182,15 @@ free_to_heap (void *mem, struct heap_free_area *heap) /* We have to unlock the heap before we recurse to free the mmb descriptor, because we might be unmapping from the mmb heap. */ - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); -#ifdef HEAP_USE_LOCKING /* Release the descriptor block we used. */ free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); -#else - /* Release the descriptor block we used. */ - free_to_heap (mmb, &__malloc_mmb_heap); -#endif /* Do the actual munmap. */ munmap ((void *)mmb_start, mmb_end - mmb_start); - __heap_do_lock (heap_lock); + __heap_lock (heap_lock); # ifdef __UCLIBC_HAS_THREADS__ /* In a multi-threaded program, it's possible that PREV_MMB has @@ -222,7 +223,7 @@ free_to_heap (void *mem, struct heap_free_area *heap) } /* Finally release the lock for good. */ - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); MALLOC_MMB_DEBUG_INDENT (-1); @@ -252,7 +253,7 @@ free_to_heap (void *mem, struct heap_free_area *heap) } /* Release the heap lock before we do the system call. */ - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); if (unmap_end > unmap_start) /* Finally, actually unmap the memory. */ @@ -269,9 +270,5 @@ free_to_heap (void *mem, struct heap_free_area *heap) void free (void *mem) { -#ifdef HEAP_USE_LOCKING - free_to_heap (mem, __malloc_heap, &__malloc_heap_lock); -#else - free_to_heap (mem, __malloc_heap); -#endif + free_to_heap (mem, &__malloc_heap, &__malloc_heap_lock); } diff --git a/libc/stdlib/malloc/heap.h b/libc/stdlib/malloc/heap.h index 2a686b601..c0c5df821 100644 --- a/libc/stdlib/malloc/heap.h +++ b/libc/stdlib/malloc/heap.h @@ -19,11 +19,11 @@ # include <pthread.h> # include <bits/uClibc_pthread.h> # define HEAP_USE_LOCKING -# define __heap_do_lock(heap_lock) __pthread_mutex_lock (heap_lock) -# define __heap_do_unlock(heap_lock) __pthread_mutex_unlock (heap_lock) +# define __heap_lock(heap_lock) __pthread_mutex_lock (heap_lock) +# define __heap_unlock(heap_lock) __pthread_mutex_unlock (heap_lock) #else -# define __heap_do_lock(heap_lock) -# define __heap_do_unlock(heap_lock) +# define __heap_lock(heap_lock) +# define __heap_unlock(heap_lock) #endif @@ -123,14 +123,14 @@ extern void __heap_check (struct heap_free_area *heap, const char *str); /* Delete the free-area FA from HEAP. */ static __inline__ void -__heap_delete (struct heap_free_area *heap, struct heap_free_area *fa) +__heap_delete (struct heap_free_area **heap, struct heap_free_area *fa) { if (fa->next) fa->next->prev = fa->prev; if (fa->prev) fa->prev->next = fa->next; else - heap = fa->next; + *heap = fa->next; } @@ -138,7 +138,7 @@ __heap_delete (struct heap_free_area *heap, struct heap_free_area *fa) HEAP. PREV and NEXT may be 0; if PREV is 0, FA is installed as the first free-area. */ static __inline__ void -__heap_link_free_area (struct heap_free_area *heap, struct heap_free_area *fa, +__heap_link_free_area (struct heap_free_area **heap, struct heap_free_area *fa, struct heap_free_area *prev, struct heap_free_area *next) { @@ -148,7 +148,7 @@ __heap_link_free_area (struct heap_free_area *heap, struct heap_free_area *fa, if (prev) prev->next = fa; else - heap = fa; + *heap = fa; if (next) next->prev = fa; } @@ -157,14 +157,14 @@ __heap_link_free_area (struct heap_free_area *heap, struct heap_free_area *fa, PREV may be 0, in which case FA is installed as the first free-area (but FA may not be 0). */ static __inline__ void -__heap_link_free_area_after (struct heap_free_area *heap, +__heap_link_free_area_after (struct heap_free_area **heap, struct heap_free_area *fa, struct heap_free_area *prev) { if (prev) prev->next = fa; else - heap = fa; + *heap = fa; fa->prev = prev; } @@ -173,7 +173,7 @@ __heap_link_free_area_after (struct heap_free_area *heap, PREV and NEXT may be 0; if PREV is 0, MEM is installed as the first free-area. */ static __inline__ struct heap_free_area * -__heap_add_free_area (struct heap_free_area *heap, void *mem, size_t size, +__heap_add_free_area (struct heap_free_area **heap, void *mem, size_t size, struct heap_free_area *prev, struct heap_free_area *next) { @@ -191,7 +191,7 @@ __heap_add_free_area (struct heap_free_area *heap, void *mem, size_t size, /* Allocate SIZE bytes from the front of the free-area FA in HEAP, and return the amount actually allocated (which may be more than SIZE). */ static __inline__ size_t -__heap_free_area_alloc (struct heap_free_area *heap, +__heap_free_area_alloc (struct heap_free_area **heap, struct heap_free_area *fa, size_t size) { size_t fa_size = fa->size; @@ -215,15 +215,15 @@ __heap_free_area_alloc (struct heap_free_area *heap, /* Allocate and return a block at least *SIZE bytes long from HEAP. *SIZE is adjusted to reflect the actual amount allocated (which may be greater than requested). */ -extern void *__heap_alloc (struct heap_free_area *heap, size_t *size); +extern void *__heap_alloc (struct heap_free_area **heap, size_t *size); /* Allocate SIZE bytes at address MEM in HEAP. Return the actual size allocated, or 0 if we failed. */ -extern size_t __heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size); +extern size_t __heap_alloc_at (struct heap_free_area **heap, void *mem, size_t size); /* Return the memory area MEM of size SIZE to HEAP. Returns the heap free area into which the memory was placed. */ -extern struct heap_free_area *__heap_free (struct heap_free_area *heap, +extern struct heap_free_area *__heap_free (struct heap_free_area **heap, void *mem, size_t size); /* Return true if HEAP contains absolutely no memory. */ diff --git a/libc/stdlib/malloc/heap_alloc.c b/libc/stdlib/malloc/heap_alloc.c index cd52038d3..77b7d8560 100644 --- a/libc/stdlib/malloc/heap_alloc.c +++ b/libc/stdlib/malloc/heap_alloc.c @@ -20,7 +20,7 @@ *SIZE is adjusted to reflect the actual amount allocated (which may be greater than requested). */ void * -__heap_alloc (struct heap_free_area *heap, size_t *size) +__heap_alloc (struct heap_free_area **heap, size_t *size) { struct heap_free_area *fa; size_t _size = *size; @@ -33,10 +33,10 @@ __heap_alloc (struct heap_free_area *heap, size_t *size) we must make sure that every allocated block can hold one. */ _size = HEAP_ADJUST_SIZE (sizeof (struct heap_free_area)); - HEAP_DEBUG (heap, "before __heap_alloc"); + HEAP_DEBUG (*heap, "before __heap_alloc"); /* Look for a free area that can contain _SIZE bytes. */ - for (fa = heap; fa; fa = fa->next) + for (fa = *heap; fa; fa = fa->next) if (fa->size >= _size) { /* Found one! */ @@ -45,7 +45,7 @@ __heap_alloc (struct heap_free_area *heap, size_t *size) break; } - HEAP_DEBUG (heap, "after __heap_alloc"); + HEAP_DEBUG (*heap, "after __heap_alloc"); return mem; } diff --git a/libc/stdlib/malloc/heap_alloc_at.c b/libc/stdlib/malloc/heap_alloc_at.c index 4c071b9ef..45d68598a 100644 --- a/libc/stdlib/malloc/heap_alloc_at.c +++ b/libc/stdlib/malloc/heap_alloc_at.c @@ -19,17 +19,17 @@ /* Allocate SIZE bytes at address MEM in HEAP. Return the actual size allocated, or 0 if we failed. */ size_t -__heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size) +__heap_alloc_at (struct heap_free_area **heap, void *mem, size_t size) { struct heap_free_area *fa; size_t alloced = 0; size = HEAP_ADJUST_SIZE (size); - HEAP_DEBUG (heap, "before __heap_alloc_at"); + HEAP_DEBUG (*heap, "before __heap_alloc_at"); /* Look for a free area that can contain SIZE bytes. */ - for (fa = heap; fa; fa = fa->next) + for (fa = *heap; fa; fa = fa->next) { void *fa_mem = HEAP_FREE_AREA_START (fa); if (fa_mem <= mem) @@ -41,7 +41,7 @@ __heap_alloc_at (struct heap_free_area *heap, void *mem, size_t size) } } - HEAP_DEBUG (heap, "after __heap_alloc_at"); + HEAP_DEBUG (*heap, "after __heap_alloc_at"); return alloced; } diff --git a/libc/stdlib/malloc/heap_free.c b/libc/stdlib/malloc/heap_free.c index 3326bc691..15343c05a 100644 --- a/libc/stdlib/malloc/heap_free.c +++ b/libc/stdlib/malloc/heap_free.c @@ -18,12 +18,12 @@ /* Return the block of memory at MEM, of size SIZE, to HEAP. */ struct heap_free_area * -__heap_free (struct heap_free_area *heap, void *mem, size_t size) +__heap_free (struct heap_free_area **heap, void *mem, size_t size) { struct heap_free_area *fa, *prev_fa; void *end = (char *)mem + size; - HEAP_DEBUG (heap, "before __heap_free"); + HEAP_DEBUG (*heap, "before __heap_free"); /* Find the right position in the free-list entry to place the new block. This is the most speed critical loop in this malloc implementation: @@ -32,7 +32,7 @@ __heap_free (struct heap_free_area *heap, void *mem, size_t size) in the free-list when it becomes fragmented and long. [A better implemention would use a balanced tree or something for the free-list, though that bloats the code-size and complexity quite a bit.] */ - for (prev_fa = 0, fa = heap; fa; prev_fa = fa, fa = fa->next) + for (prev_fa = 0, fa = *heap; fa; prev_fa = fa, fa = fa->next) if (unlikely (HEAP_FREE_AREA_END (fa) >= mem)) break; @@ -83,7 +83,7 @@ __heap_free (struct heap_free_area *heap, void *mem, size_t size) /* Make the new block into a separate free-list entry. */ fa = __heap_add_free_area (heap, mem, size, prev_fa, fa); - HEAP_DEBUG (heap, "after __heap_free"); + HEAP_DEBUG (*heap, "after __heap_free"); return fa; } diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c index d58d62d2d..af55cd328 100644 --- a/libc/stdlib/malloc/malloc.c +++ b/libc/stdlib/malloc/malloc.c @@ -53,12 +53,17 @@ malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER; #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ -static void * #ifdef HEAP_USE_LOCKING -malloc_from_heap (size_t size, struct heap_free_area *heap, malloc_mutex_t *heap_lock) +#define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap, lck) #else -malloc_from_heap (size_t size, struct heap_free_area *heap) +#define malloc_from_heap(size, heap, lck) __malloc_from_heap(size, heap) #endif +static void * +__malloc_from_heap (size_t size, struct heap_free_area **heap +#ifdef HEAP_USE_LOCKING + , malloc_mutex_t *heap_lock +#endif + ) { void *mem; @@ -67,12 +72,12 @@ malloc_from_heap (size_t size, struct heap_free_area *heap) /* Include extra space to record the size of the allocated block. */ size += MALLOC_HEADER_SIZE; - __heap_do_lock (heap_lock); + __heap_lock (heap_lock); /* First try to get memory that's already in our heap. */ mem = __heap_alloc (heap, &size); - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); if (unlikely (! mem)) /* We couldn't allocate from the heap, so grab some more @@ -132,11 +137,11 @@ malloc_from_heap (size_t size, struct heap_free_area *heap) struct malloc_mmb *mmb, *prev_mmb, *new_mmb; #endif - MALLOC_DEBUG (1, "adding system memroy to heap: 0x%lx - 0x%lx (%d bytes)", + MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)", (long)block, (long)block + block_size, block_size); /* Get back the heap lock. */ - __heap_do_lock (heap_lock); + __heap_lock (heap_lock); /* Put BLOCK into the heap. */ __heap_free (heap, block, block_size); @@ -146,7 +151,7 @@ malloc_from_heap (size_t size, struct heap_free_area *heap) /* Try again to allocate. */ mem = __heap_alloc (heap, &size); - __heap_do_unlock (heap_lock); + __heap_unlock (heap_lock); #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) /* Insert a record of BLOCK in sorted order into the @@ -158,11 +163,7 @@ malloc_from_heap (size_t size, struct heap_free_area *heap) if (block < mmb->mem) break; -#ifdef HEAP_USE_LOCKING - new_mmb = malloc_from_heap (sizeof *new_mmb, __malloc_mmb_heap, &__malloc_mmb_heap_lock); -#else - new_mmb = malloc_from_heap (sizeof *new_mmb, __malloc_mmb_heap); -#endif + new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); new_mmb->next = mmb; new_mmb->mem = block; new_mmb->size = block_size; @@ -221,11 +222,7 @@ malloc (size_t size) if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2)))) goto oom; -#ifdef HEAP_USE_LOCKING - mem = malloc_from_heap (size, __malloc_heap, &__malloc_heap_lock); -#else - mem = malloc_from_heap (size, __malloc_heap); -#endif + mem = malloc_from_heap (size, &__malloc_heap, &__malloc_heap_lock); if (unlikely (!mem)) { oom: diff --git a/libc/stdlib/malloc/memalign.c b/libc/stdlib/malloc/memalign.c index 114299b43..6826d623a 100644 --- a/libc/stdlib/malloc/memalign.c +++ b/libc/stdlib/malloc/memalign.c @@ -36,7 +36,7 @@ memalign (size_t alignment, size_t size) { void *mem, *base; unsigned long tot_addr, tot_end_addr, addr, end_addr; - struct heap_free_area *heap = __malloc_heap; + struct heap_free_area **heap = &__malloc_heap; /* Make SIZE something we like. */ size = HEAP_ADJUST_SIZE (size); diff --git a/libc/stdlib/malloc/realloc.c b/libc/stdlib/malloc/realloc.c index f12123aa9..a8271995b 100644 --- a/libc/stdlib/malloc/realloc.c +++ b/libc/stdlib/malloc/realloc.c @@ -59,9 +59,9 @@ realloc (void *mem, size_t new_size) { size_t extra = new_size - size; - __heap_do_lock (&__malloc_heap_lock); - extra = __heap_alloc_at (__malloc_heap, base_mem + size, extra); - __heap_do_unlock (&__malloc_heap_lock); + __heap_lock (&__malloc_heap_lock); + extra = __heap_alloc_at (&__malloc_heap, base_mem + size, extra); + __heap_unlock (&__malloc_heap_lock); if (extra) /* Record the changed size. */ @@ -82,9 +82,9 @@ realloc (void *mem, size_t new_size) else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size) /* Shrink the block. */ { - __heap_do_lock (&__malloc_heap_lock); - __heap_free (__malloc_heap, base_mem + new_size, size - new_size); - __heap_do_unlock (&__malloc_heap_lock); + __heap_lock (&__malloc_heap_lock); + __heap_free (&__malloc_heap, base_mem + new_size, size - new_size); + __heap_unlock (&__malloc_heap_lock); MALLOC_SET_SIZE (base_mem, new_size); } |