summaryrefslogtreecommitdiff
path: root/libc/stdlib/malloc
diff options
context:
space:
mode:
authorMiles Bader <miles@lsi.nec.co.jp>2002-10-15 02:15:16 +0000
committerMiles Bader <miles@lsi.nec.co.jp>2002-10-15 02:15:16 +0000
commit6882d43363b5411a16657378b2f8110988dd9660 (patch)
tree9ece05cc9c4ae924d012f723b9e20e5eb0f8f396 /libc/stdlib/malloc
parenta1d020f74e0702791d2d4cbad5a69bcc2adfecfc (diff)
Fix locking to not deadlock when __UCLIBC_UCLINUX_BROKEN_MUNMAP__ is defined.
Diffstat (limited to 'libc/stdlib/malloc')
-rw-r--r--libc/stdlib/malloc/free.c15
-rw-r--r--libc/stdlib/malloc/malloc.c51
2 files changed, 32 insertions, 34 deletions
diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
index 6c3211360..a00f996d9 100644
--- a/libc/stdlib/malloc/free.c
+++ b/libc/stdlib/malloc/free.c
@@ -105,7 +105,7 @@ free_to_heap (void *mem, struct heap *heap)
#ifdef MALLOC_USE_SBRK
- /* Release the main lock; we're still holding the sbrk lock. */
+ /* Release the heap lock; we're still holding the sbrk lock. */
__heap_unlock (heap);
/* Lower the brk. */
sbrk (start - end);
@@ -161,16 +161,21 @@ free_to_heap (void *mem, struct heap *heap)
else
__malloc_mmapped_blocks = next_mmb;
+ /* Start searching again from the end of this block. */
+ start = mmb_end;
+
+ /* We have to unlock the heap before we recurse to free the mmb
+ descriptor, because we might be unmapping from the mmb
+ heap. */
+ __heap_unlock (heap);
+
/* Release the descriptor block we used. */
free_to_heap (mmb, &__malloc_mmb_heap);
/* Do the actual munmap. */
- __heap_unlock (heap);
munmap ((void *)mmb_start, mmb_end - mmb_start);
- __heap_lock (heap);
- /* Start searching again from the end of that block. */
- start = mmb_end;
+ __heap_lock (heap);
# ifdef __UCLIBC_HAS_THREADS__
/* In a multi-threaded program, it's possible that PREV_MMB has
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
index f6dd3099d..25fda4115 100644
--- a/libc/stdlib/malloc/malloc.c
+++ b/libc/stdlib/malloc/malloc.c
@@ -66,6 +66,8 @@ malloc_from_heap (size_t size, struct heap *heap)
/* First try to get memory that's already in our heap. */
mem = __heap_alloc (heap, &size);
+ __heap_unlock (heap);
+
if (unlikely (! mem))
/* We couldn't allocate from the heap, so grab some more
from the system, add it to the heap, and try again. */
@@ -78,19 +80,11 @@ malloc_from_heap (size_t size, struct heap *heap)
? MALLOC_HEAP_EXTEND_SIZE
: MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
-#ifdef MALLOC_USE_SBRK
- /* Get the sbrk lock while we've still got the heap lock. */
- __malloc_lock_sbrk ();
-#endif
-
- /* Don't hold the heap lock during the syscall, so that small
- allocations in a different thread may succeed while we're
- blocked. */
- __heap_unlock (heap);
-
/* Allocate the new heap block. */
#ifdef MALLOC_USE_SBRK
+ __malloc_lock_sbrk ();
+
/* Use sbrk we can, as it's faster than mmap, and guarantees
contiguous allocation. */
block = sbrk (block_size);
@@ -110,6 +104,7 @@ malloc_from_heap (size_t size, struct heap *heap)
block = (void *)aligned_block;
}
}
+
__malloc_unlock_sbrk ();
#else /* !MALLOC_USE_SBRK */
@@ -120,22 +115,13 @@ malloc_from_heap (size_t size, struct heap *heap)
#endif /* MALLOC_USE_SBRK */
- /* Get back the heap lock. */
- __heap_lock (heap);
-
if (likely (block != (void *)-1))
{
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
- struct malloc_mmb *mmb, *prev_mmb, *new_mmb;
-#endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
-
- MALLOC_DEBUG (" adding memory: 0x%lx - 0x%lx (%d bytes)\n",
- (long)block, (long)block + block_size, block_size);
-
- /* Put BLOCK into the heap. */
- __heap_free (heap, block, block_size);
+ struct malloc_mmb *mmb, *prev_mmb;
+ struct malloc_mmb *new_mmb
+ = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
-#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
/* Insert a record of this allocation in sorted order into the
__malloc_mmapped_blocks list. */
@@ -145,29 +131,36 @@ malloc_from_heap (size_t size, struct heap *heap)
if (block < mmb->mem)
break;
- new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
new_mmb->next = mmb;
new_mmb->mem = block;
new_mmb->size = block_size;
- MALLOC_MMB_DEBUG (" new mmb at 0x%x: 0x%x[%d]\n",
- (unsigned)new_mmb,
- (unsigned)new_mmb->mem, block_size);
-
if (prev_mmb)
prev_mmb->next = new_mmb;
else
__malloc_mmapped_blocks = new_mmb;
+ MALLOC_MMB_DEBUG (" new mmb at 0x%x: 0x%x[%d]\n",
+ (unsigned)new_mmb,
+ (unsigned)new_mmb->mem, block_size);
#endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+ MALLOC_DEBUG (" adding memory: 0x%lx - 0x%lx (%d bytes)\n",
+ (long)block, (long)block + block_size, block_size);
+
+ /* Get back the heap lock. */
+ __heap_lock (heap);
+
+ /* Put BLOCK into the heap. */
+ __heap_free (heap, block, block_size);
+
/* Try again to allocate. */
mem = __heap_alloc (heap, &size);
+
+ __heap_unlock (heap);
}
}
- __heap_unlock (heap);
-
if (likely (mem))
/* Record the size of the block and get the user address. */
{