diff options
author | Freeman Wang <xwang@ubicom.com> | 2009-12-19 13:43:00 -0800 |
---|---|---|
committer | Austin Foxley <austinf@cetoncorp.com> | 2009-12-19 13:53:32 -0800 |
commit | 23528282b771d1af3df0fa17f1e909ad3b663f59 (patch) | |
tree | b51c4b640cf7f22708e027739e322dc573a3d430 /libc/stdlib/malloc/malloc.c | |
parent | 7dcd83e8bded560b6a2c658889995390d3021a92 (diff) |
malloc: fix race condition and other bugs in the no-mmu malloc
Fixes multiple race conditions on mmb list. This was done by
making the mmb_heap_lock into a recursive lock and making the
regular heap_lock extend to cover the mmb heap handling.
Also move the new_mmb allocation up to before the mmb list is
iterated through to find the insertion point. When the mmb_heap
also runs out and needs to be extended when the regular heap is
just extended, the mmb list could be messed up.
Signed-off-by: Freeman Wang <xwang@ubicom.com>
Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
Diffstat (limited to 'libc/stdlib/malloc/malloc.c')
-rw-r--r-- | libc/stdlib/malloc/malloc.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c index 337206f09..d58a7d0ee 100644 --- a/libc/stdlib/malloc/malloc.c +++ b/libc/stdlib/malloc/malloc.c @@ -46,7 +46,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0; HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */ struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa); #ifdef HEAP_USE_LOCKING -malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER; +malloc_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; #endif #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ @@ -149,19 +149,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap /* Try again to allocate. */ mem = __heap_alloc (heap, &size); - __heap_unlock (heap_lock); #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) /* Insert a record of BLOCK in sorted order into the __malloc_mmapped_blocks list. */ + new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); + for (prev_mmb = 0, mmb = __malloc_mmapped_blocks; mmb; prev_mmb = mmb, mmb = mmb->next) if (block < mmb->mem) break; - new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); new_mmb->next = mmb; new_mmb->mem = block; new_mmb->size = block_size; @@ -175,6 +175,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap (unsigned)new_mmb, (unsigned)new_mmb->mem, block_size); #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ + __heap_unlock (heap_lock); } } |