diff options
-rw-r--r-- | libc/stdlib/malloc/free.c | 190 |
1 files changed, 95 insertions, 95 deletions
diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c index 6908e16c1..32e1f766b 100644 --- a/libc/stdlib/malloc/free.c +++ b/libc/stdlib/malloc/free.c @@ -22,121 +22,121 @@ void free (void *mem) { - if (mem) - { - size_t size; - struct heap_free_area *fa; - struct heap *heap = &__malloc_heap; + size_t size; + struct heap_free_area *fa; + struct heap *heap = &__malloc_heap; - size = MALLOC_SIZE (mem); - mem = MALLOC_BASE (mem); + if (unlikely (! mem)) + return; - MALLOC_DEBUG ("free: 0x%lx (base = 0x%lx, total_size = %d)\n", - (long)MALLOC_ADDR (mem), (long)mem, size); + size = MALLOC_SIZE (mem); + mem = MALLOC_BASE (mem); - __malloc_lock (); + MALLOC_DEBUG ("free: 0x%lx (base = 0x%lx, total_size = %d)\n", + (long)MALLOC_ADDR (mem), (long)mem, size); - /* Put MEM back in the heap, and get the free-area it was placed in. */ - fa = __heap_free (heap, mem, size); + __malloc_lock (); - /* See if the free-area FA has grown big enough that it should be - unmapped. */ - if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD) - /* Nope, nothing left to do, just release the lock. */ - __malloc_unlock (); - else - /* Yup, try to unmap FA. */ - { - unsigned long start = (unsigned long)HEAP_FREE_AREA_START (fa); - unsigned long end = (unsigned long)HEAP_FREE_AREA_END (fa); + /* Put MEM back in the heap, and get the free-area it was placed in. */ + fa = __heap_free (heap, mem, size); + + /* See if the free-area FA has grown big enough that it should be + unmapped. */ + if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD) + /* Nope, nothing left to do, just release the lock. */ + __malloc_unlock (); + else + /* Yup, try to unmap FA. */ + { + unsigned long start = (unsigned long)HEAP_FREE_AREA_START (fa); + unsigned long end = (unsigned long)HEAP_FREE_AREA_END (fa); #ifndef MALLOC_USE_SBRK - unsigned long unmap_start, unmap_end; + unsigned long unmap_start, unmap_end; #endif #ifdef MALLOC_USE_SBRK - /* Get the sbrk lock so that the two possible calls to sbrk below - are guaranteed to be contiguous. */ - __malloc_lock_sbrk (); - /* When using sbrk, we only shrink the heap from the end. It would - be possible to allow _both_ -- shrinking via sbrk when possible, - and otherwise shrinking via munmap, but this results in holes in - memory that prevent the brk from every growing back down; since - we only ever grow the heap via sbrk, this tends to produce a - continuously growing brk (though the actual memory is unmapped), - which could eventually run out of address space. Note that - `sbrk(0)' shouldn't normally do a system call, so this test is - reasonably cheap. */ - if ((void *)end != sbrk (0)) - { - MALLOC_DEBUG (" not unmapping: 0x%lx - 0x%lx (%ld bytes)\n", - start, end, end - start); - __malloc_unlock_sbrk (); - __malloc_unlock (); - return; - } + /* Get the sbrk lock so that the two possible calls to sbrk below + are guaranteed to be contiguous. */ + __malloc_lock_sbrk (); + /* When using sbrk, we only shrink the heap from the end. It would + be possible to allow _both_ -- shrinking via sbrk when possible, + and otherwise shrinking via munmap, but this results in holes in + memory that prevent the brk from every growing back down; since + we only ever grow the heap via sbrk, this tends to produce a + continuously growing brk (though the actual memory is unmapped), + which could eventually run out of address space. Note that + `sbrk(0)' shouldn't normally do a system call, so this test is + reasonably cheap. */ + if ((void *)end != sbrk (0)) + { + MALLOC_DEBUG (" not unmapping: 0x%lx - 0x%lx (%ld bytes)\n", + start, end, end - start); + __malloc_unlock_sbrk (); + __malloc_unlock (); + return; + } #endif - MALLOC_DEBUG (" unmapping: 0x%lx - 0x%lx (%ld bytes)\n", - start, end, end - start); + MALLOC_DEBUG (" unmapping: 0x%lx - 0x%lx (%ld bytes)\n", + start, end, end - start); + + /* Remove FA from the heap. */ + __heap_unlink_free_area (heap, fa); - /* Remove FA from the heap. */ - __heap_unlink_free_area (heap, fa); - - if (!fa->next && !fa->prev) - /* We want to avoid the heap from losing all memory, so reserve - a bit. This test is only a heuristic -- the existance of - another free area, even if it's smaller than - MALLOC_MIN_SIZE, will cause us not to reserve anything. */ - { - /* Put the reserved memory back in the heap; we asssume that - MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so - we use the latter unconditionally here. */ - __heap_free (heap, (void *)start, MALLOC_MIN_SIZE); - start += MALLOC_MIN_SIZE; - } + if (!fa->next && !fa->prev) + /* We want to avoid the heap from losing all memory, so reserve + a bit. This test is only a heuristic -- the existance of + another free area, even if it's smaller than + MALLOC_MIN_SIZE, will cause us not to reserve anything. */ + { + /* Put the reserved memory back in the heap; we asssume that + MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so + we use the latter unconditionally here. */ + __heap_free (heap, (void *)start, MALLOC_MIN_SIZE); + start += MALLOC_MIN_SIZE; + } #ifdef MALLOC_USE_SBRK - /* Release the main lock; we're still holding the sbrk lock. */ - __malloc_unlock (); - /* Lower the brk. */ - sbrk (start - end); - /* Release the sbrk lock too; now we hold no locks. */ - __malloc_unlock_sbrk (); + /* Release the main lock; we're still holding the sbrk lock. */ + __malloc_unlock (); + /* Lower the brk. */ + sbrk (start - end); + /* Release the sbrk lock too; now we hold no locks. */ + __malloc_unlock_sbrk (); #else /* !MALLOC_USE_SBRK */ - /* MEM/LEN may not be page-aligned, so we have to page-align them, - and return any left-over bits on the end to the heap. */ - unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start); - unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end); - - /* We have to be careful that any left-over bits are large enough to - return. Note that we _don't check_ to make sure there's room to - grow/shrink the start/end by another page, we just assume that - the unmap threshold is high enough so that this is always safe - (i.e., it should probably be at least 3 pages). */ - if (unmap_start > start) - { - if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE) - unmap_start += MALLOC_PAGE_SIZE; - __heap_free (heap, (void *)start, unmap_start - start); - } - if (end > unmap_end) - { - if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE) - unmap_end -= MALLOC_PAGE_SIZE; - __heap_free (heap, (void *)unmap_end, end - unmap_end); - } - - /* Release the malloc lock before we do the system call. */ - __malloc_unlock (); + /* MEM/LEN may not be page-aligned, so we have to page-align them, + and return any left-over bits on the end to the heap. */ + unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start); + unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end); + + /* We have to be careful that any left-over bits are large enough to + return. Note that we _don't check_ to make sure there's room to + grow/shrink the start/end by another page, we just assume that + the unmap threshold is high enough so that this is always safe + (i.e., it should probably be at least 3 pages). */ + if (unmap_start > start) + { + if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE) + unmap_start += MALLOC_PAGE_SIZE; + __heap_free (heap, (void *)start, unmap_start - start); + } + if (end > unmap_end) + { + if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE) + unmap_end -= MALLOC_PAGE_SIZE; + __heap_free (heap, (void *)unmap_end, end - unmap_end); + } - if (unmap_end > unmap_start) - /* Finally, actually unmap the memory. */ - munmap ((void *)unmap_start, unmap_end - unmap_start); + /* Release the malloc lock before we do the system call. */ + __malloc_unlock (); + + if (unmap_end > unmap_start) + /* Finally, actually unmap the memory. */ + munmap ((void *)unmap_start, unmap_end - unmap_start); #endif /* MALLOC_USE_SBRK */ - } } } |